mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
586 Commits
fix-azure-
...
azure-pipe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
85882fa2de | ||
|
|
27a075e9fc | ||
|
|
5065c422b4 | ||
|
|
72d5b11d1b | ||
|
|
526a3347ac | ||
|
|
23910ba53b | ||
|
|
ee7101e6af | ||
|
|
36c1b37dd9 | ||
|
|
72782bdda6 | ||
|
|
b94eef16c1 | ||
|
|
d75fbe4852 | ||
|
|
e6ab237fcd | ||
|
|
a7eec91d69 | ||
|
|
b3e94b018c | ||
|
|
ca0e9ea55d | ||
|
|
53e3c2e263 | ||
|
|
02eb747d71 | ||
|
|
d51a970932 | ||
|
|
a9438cf364 | ||
|
|
5ef3c988eb | ||
|
|
78150e82a2 | ||
|
|
6f0cc51eeb | ||
|
|
84e2806c4b | ||
|
|
0386d22cc9 | ||
|
|
0be14120e4 | ||
|
|
95af1f9ccf | ||
|
|
629b7eacd8 | ||
|
|
d3149acc32 | ||
|
|
6a3e301303 | ||
|
|
5be968c0ca | ||
|
|
f1a687c540 | ||
|
|
94ee43fe54 | ||
|
|
c2635e39cc | ||
|
|
8c511ec9cd | ||
|
|
ac0dce78d0 | ||
|
|
f347514f62 | ||
|
|
57d5de6fba | ||
|
|
4ba6532915 | ||
|
|
ff235e4e56 | ||
|
|
68e641f6cf | ||
|
|
53a1a0e3ef | ||
|
|
8243ff8bc8 | ||
|
|
be0464f5f1 | ||
|
|
2d561b51db | ||
|
|
9241a93c2d | ||
|
|
fb32f77bac | ||
|
|
520fb03bfd | ||
|
|
a3449bda30 | ||
|
|
ccc416e62b | ||
|
|
a35aa1360e | ||
|
|
3df9dbf887 | ||
|
|
9af0a704af | ||
|
|
691e5ae5f0 | ||
|
|
5a44bafa4e | ||
|
|
8fdce31700 | ||
|
|
493dfb68fd | ||
|
|
71587344c6 | ||
|
|
8e8b78d7e5 | ||
|
|
266600dba7 | ||
|
|
e4f6ccbff2 | ||
|
|
1f1ab179a6 | ||
|
|
c642531a1e | ||
|
|
19ae053168 | ||
|
|
def790986c | ||
|
|
0a1169e659 | ||
|
|
5433021e8b | ||
|
|
c9f77719e4 | ||
|
|
3cd63a00be | ||
|
|
d7016866e0 | ||
|
|
d72e4105fb | ||
|
|
b4266da4eb | ||
|
|
3f5767b94e | ||
|
|
1510e12659 | ||
|
|
ede03258bc | ||
|
|
7fcbb47b1c | ||
|
|
9cafeeb4b6 | ||
|
|
a1cfe61ffd | ||
|
|
5eebbaaac4 | ||
|
|
bc70bff125 | ||
|
|
cf15b88efa | ||
|
|
dcaee0016a | ||
|
|
387b496d1e | ||
|
|
734f504d5f | ||
|
|
7153909390 | ||
|
|
ea35e807db | ||
|
|
5df5a3b78e | ||
|
|
37c1144b46 | ||
|
|
8d116ba0c9 | ||
|
|
6a3c3d9b89 | ||
|
|
a6dca4c13f | ||
|
|
cc0800a72e | ||
|
|
1be1fc073e | ||
|
|
70c6b01f54 | ||
|
|
7b2b396d37 | ||
|
|
af2596f98b | ||
|
|
61fb326a80 | ||
|
|
de14378734 | ||
|
|
eea1b6de32 | ||
|
|
6bae3595a8 | ||
|
|
dde4dd0198 | ||
|
|
2d0e9885bd | ||
|
|
9ed81ac451 | ||
|
|
3245c0ae0d | ||
|
|
6ff7b2eaab | ||
|
|
38ebdf54be | ||
|
|
6cd7c3b774 | ||
|
|
07e2c3a50f | ||
|
|
cd762f04b8 | ||
|
|
6907242cae | ||
|
|
d61ba7ef78 | ||
|
|
b221d79273 | ||
|
|
940d88b695 | ||
|
|
ca324b5084 | ||
|
|
9f4589a997 | ||
|
|
fc44eb4093 | ||
|
|
a1840f6fc7 | ||
|
|
0cb7130dd2 | ||
|
|
2655bea86f | ||
|
|
08bf8faa2f | ||
|
|
4e64ee38e2 | ||
|
|
276f8cccf6 | ||
|
|
0ae844d1f8 | ||
|
|
4ee6de5c3e | ||
|
|
71a19a1972 | ||
|
|
ba72e62b41 | ||
|
|
5935cb0a29 | ||
|
|
f78cd1e043 | ||
|
|
a2c317b46e | ||
|
|
6a2a075c14 | ||
|
|
628530362a | ||
|
|
4549305fec | ||
|
|
245fed513a | ||
|
|
52332a4b24 | ||
|
|
3087c5d559 | ||
|
|
75606dcc27 | ||
|
|
f3719fe269 | ||
|
|
d2be792d5e | ||
|
|
2793d4b4cc | ||
|
|
30ac9d920a | ||
|
|
6e8e620e71 | ||
|
|
5597d6d871 | ||
|
|
622e0d19ce | ||
|
|
ce400a8fdc | ||
|
|
49c05cb89b | ||
|
|
d533de0f5c | ||
|
|
1a4fe4bc6c | ||
|
|
93207ead9c | ||
|
|
22368b997c | ||
|
|
a5bed67016 | ||
|
|
44f6491731 | ||
|
|
12c2a750f5 | ||
|
|
92bbae5cca | ||
|
|
939b19c3b7 | ||
|
|
64fb4effa7 | ||
|
|
4d195d5a52 | ||
|
|
976a020a2f | ||
|
|
550ab441c5 | ||
|
|
e24cadc7a1 | ||
|
|
903ede52cd | ||
|
|
f681d32996 | ||
|
|
2c72e7f0a2 | ||
|
|
db8cd1a993 | ||
|
|
2890b69c48 | ||
|
|
66b3795eb8 | ||
|
|
45f41c2c4a | ||
|
|
34f03ce590 | ||
|
|
e2fde62cd9 | ||
|
|
4b27c6719b | ||
|
|
fb6966b5fe | ||
|
|
454dfd3c9e | ||
|
|
e1cf551ded | ||
|
|
bd10344d65 | ||
|
|
1aa65d60e1 | ||
|
|
aa81957586 | ||
|
|
b7800e96d7 | ||
|
|
fb1bbecb41 | ||
|
|
e4c2468244 | ||
|
|
ac4c8d8dfc | ||
|
|
e2b6172f7d | ||
|
|
32f2895472 | ||
|
|
1124c423ee | ||
|
|
cd5a2d80ca | ||
|
|
1fe0773da6 | ||
|
|
5a941cdcdc | ||
|
|
62681e45fb | ||
|
|
1a2fb52266 | ||
|
|
ec4e7316f2 | ||
|
|
11264c4fb8 | ||
|
|
25f7f2b60a | ||
|
|
e7c20e0bce | ||
|
|
8ee6034b23 | ||
|
|
206e1caa99 | ||
|
|
f0e439de0d | ||
|
|
e5464a2a35 | ||
|
|
78d38dda56 | ||
|
|
60bb01b22c | ||
|
|
95a74e02c7 | ||
|
|
d014aef011 | ||
|
|
be8c23f0b4 | ||
|
|
da3b685cd8 | ||
|
|
9aac2d6965 | ||
|
|
81fad0f0e3 | ||
|
|
cff85f0b95 | ||
|
|
9c0dac4ccd | ||
|
|
5ccc2dcb8f | ||
|
|
8c5503631a | ||
|
|
2f3d794ec6 | ||
|
|
abeb12c6df | ||
|
|
9c6f3ae82c | ||
|
|
870b15313e | ||
|
|
62a7e44e86 | ||
|
|
296e4936a0 | ||
|
|
a0b9d4a239 | ||
|
|
99bc013c0a | ||
|
|
d9cad9d10b | ||
|
|
0e23c4542f | ||
|
|
f544234e26 | ||
|
|
dbf9800cbc | ||
|
|
1f19b63264 | ||
|
|
5c0e5b85f7 | ||
|
|
edda6d91cd | ||
|
|
1fefa6adfd | ||
|
|
af030f74f5 | ||
|
|
ada8c22a97 | ||
|
|
610466c18c | ||
|
|
9950bb9b7c | ||
|
|
7d70e92664 | ||
|
|
687cbf3ded | ||
|
|
c3af0a1eca | ||
|
|
822483aac5 | ||
|
|
2eb31a4f1d | ||
|
|
0655738da6 | ||
|
|
7c4fe3eb75 | ||
|
|
72721f4c8d | ||
|
|
0c60c00187 | ||
|
|
0d511b7878 | ||
|
|
bd2a7ffcf4 | ||
|
|
7a5ee968e7 | ||
|
|
c809334b3d | ||
|
|
b88e50cc36 | ||
|
|
bbe28df800 | ||
|
|
f865280afa | ||
|
|
8beab1aaf2 | ||
|
|
b9e16b36e5 | ||
|
|
b68c3ce74d | ||
|
|
d04b0b856a | ||
|
|
d0ff07bdb0 | ||
|
|
577fda059d | ||
|
|
49d2ab512d | ||
|
|
9df322e889 | ||
|
|
8f89b03d7b | ||
|
|
48c09608ea | ||
|
|
7963320a29 | ||
|
|
81f8a5e0d9 | ||
|
|
2763598bd1 | ||
|
|
49d7b0d278 | ||
|
|
3d475dc0ee | ||
|
|
2657d70567 | ||
|
|
45df37f55f | ||
|
|
81007c10cb | ||
|
|
aba15f11d8 | ||
|
|
a57756a05c | ||
|
|
eeab7a0a43 | ||
|
|
ac8d1db8d3 | ||
|
|
cd0d43fffb | ||
|
|
cdf12b1fc8 | ||
|
|
7981e450a4 | ||
|
|
e7fc3dcd31 | ||
|
|
2386c5adc1 | ||
|
|
2f21aa86b4 | ||
|
|
16d8014cbb | ||
|
|
613a9bb86b | ||
|
|
8190a81201 | ||
|
|
f5795db6d2 | ||
|
|
e2a2eb349f | ||
|
|
a0d4fdb2fa | ||
|
|
a28239f005 | ||
|
|
b05da61c82 | ||
|
|
41f01da625 | ||
|
|
901811bb26 | ||
|
|
0d4a3520ad | ||
|
|
5855714474 | ||
|
|
120de505a9 | ||
|
|
6e86526c9d | ||
|
|
0862dc9b2b | ||
|
|
1c301f9f7a | ||
|
|
9f6b09dfaf | ||
|
|
3d424c6e08 | ||
|
|
6fb1c8f51c | ||
|
|
626f0d1886 | ||
|
|
9ee9fe3885 | ||
|
|
b0380aad95 | ||
|
|
2065e73d0b | ||
|
|
d3e3bbedf3 | ||
|
|
8d29d69ade | ||
|
|
6e70d88f54 | ||
|
|
595fea757d | ||
|
|
bb80586473 | ||
|
|
0d475958c7 | ||
|
|
2728948fb0 | ||
|
|
3756f211b5 | ||
|
|
2faf2aed80 | ||
|
|
1bd8183af1 | ||
|
|
5aa706831f | ||
|
|
ac7e1dbf62 | ||
|
|
14ef4437e5 | ||
|
|
a0d2ab5b4f | ||
|
|
3bfde5f52a | ||
|
|
2b05bd9a08 | ||
|
|
1318be3b0a | ||
|
|
f4a754a36b | ||
|
|
fef73763aa | ||
|
|
7267d19ad8 | ||
|
|
47099466c0 | ||
|
|
4376019062 | ||
|
|
e5f4210b09 | ||
|
|
d5f2df2f3d | ||
|
|
efd720b533 | ||
|
|
047f00a411 | ||
|
|
bb5ac8efbe | ||
|
|
e62bbf761b | ||
|
|
54a2e99d97 | ||
|
|
28230d93b4 | ||
|
|
3c4407442d | ||
|
|
caf318d499 | ||
|
|
2fbb504b66 | ||
|
|
2b58d1a46f | ||
|
|
1582a21408 | ||
|
|
229898dcee | ||
|
|
95194adfd5 | ||
|
|
4827496234 | ||
|
|
415eeca6cf | ||
|
|
58d9a3e1b5 | ||
|
|
cccadfa7ae | ||
|
|
1b52f8d2a5 | ||
|
|
2078ad68a5 | ||
|
|
368ed9e67d | ||
|
|
7c30993bb7 | ||
|
|
55b9a4ed30 | ||
|
|
118a8b949e | ||
|
|
1d14e30383 | ||
|
|
27714e29c3 | ||
|
|
9f8e1a1dc5 | ||
|
|
1692c6bd0a | ||
|
|
d233efbf63 | ||
|
|
e9a45a5a34 | ||
|
|
f6eb5c6983 | ||
|
|
2bf19787d5 | ||
|
|
0ea3a57ecb | ||
|
|
b353c730d8 | ||
|
|
173dfbd051 | ||
|
|
e3bceb9083 | ||
|
|
52c6b373cc | ||
|
|
0bc0f62277 | ||
|
|
12c8ee4b4b | ||
|
|
5240f9d1e5 | ||
|
|
997654d77d | ||
|
|
f1809451f6 | ||
|
|
84c650818e | ||
|
|
c5775cf73d | ||
|
|
dca482e058 | ||
|
|
6943169cef | ||
|
|
4fddec113c | ||
|
|
2114fd8f26 | ||
|
|
63bb6de491 | ||
|
|
0a56a168ff | ||
|
|
88e22087a8 | ||
|
|
9404ed703a | ||
|
|
c7ecccd5ca | ||
|
|
972e27a861 | ||
|
|
8f4ea77c07 | ||
|
|
61616ba864 | ||
|
|
9ed721a3f6 | ||
|
|
0b9d7fec0c | ||
|
|
240c15883f | ||
|
|
38864adc9c | ||
|
|
5991315990 | ||
|
|
73f0a67d98 | ||
|
|
ffe067d6e7 | ||
|
|
b5f563fb0f | ||
|
|
9310c7f3e2 | ||
|
|
1c1a8ef24b | ||
|
|
2cfbc2852d | ||
|
|
b167d30420 | ||
|
|
ec59760d9c | ||
|
|
076d3da825 | ||
|
|
c3eecbe933 | ||
|
|
d8e5b19ed4 | ||
|
|
43bc381e90 | ||
|
|
fb5ee22112 | ||
|
|
35327dad6f | ||
|
|
ef5e1909a0 | ||
|
|
bca5d8009e | ||
|
|
334f19c974 | ||
|
|
42a5bf1d9f | ||
|
|
71d1890316 | ||
|
|
d29c545627 | ||
|
|
eb85ecc9c4 | ||
|
|
0dc08e1e61 | ||
|
|
76532408ef | ||
|
|
60a4a8a86d | ||
|
|
a0d4c04687 | ||
|
|
f3874707ee | ||
|
|
f8c2689e77 | ||
|
|
8ec55ae20b | ||
|
|
fc1bf5f931 | ||
|
|
578d00666c | ||
|
|
f5c853b5c8 | ||
|
|
23c0cd2482 | ||
|
|
8217f361cc | ||
|
|
a0016e00d1 | ||
|
|
99c37028ee | ||
|
|
cfba337ef0 | ||
|
|
fd370fcad2 | ||
|
|
c680bb3254 | ||
|
|
7d5d6c041f | ||
|
|
bdc638530e | ||
|
|
315cee23a0 | ||
|
|
2135879dda | ||
|
|
da90069462 | ||
|
|
08c4854e00 | ||
|
|
a838add230 | ||
|
|
d68b091170 | ||
|
|
d809bed438 | ||
|
|
3aa1818870 | ||
|
|
96f6708461 | ||
|
|
6641a25f8c | ||
|
|
cd46ce916b | ||
|
|
318d1bb6f9 | ||
|
|
b8b53901e8 | ||
|
|
6e153781a7 | ||
|
|
f27c2d9760 | ||
|
|
eb91356e28 | ||
|
|
bed2971bf0 | ||
|
|
f0696dfe30 | ||
|
|
a43ed567ee | ||
|
|
fffdbb31f5 | ||
|
|
cacefb9a82 | ||
|
|
d966cef14c | ||
|
|
a551978a3f | ||
|
|
97752ca8fb | ||
|
|
8d5d332daf | ||
|
|
6b3a9bf26a | ||
|
|
c1d9a1e174 | ||
|
|
98120bb864 | ||
|
|
f8ced557e3 | ||
|
|
7b20139c6a | ||
|
|
c496efe9a4 | ||
|
|
cf583e0237 | ||
|
|
f09d0f5fef | ||
|
|
1e6cbaa355 | ||
|
|
be643ecfbc | ||
|
|
0c4ed35b9b | ||
|
|
4e4feebf0a | ||
|
|
291f270904 | ||
|
|
f799be1d6a | ||
|
|
74297a0c55 | ||
|
|
7e13103ba2 | ||
|
|
34baf05d9d | ||
|
|
38c0018906 | ||
|
|
6f25e48cbb | ||
|
|
7e99abb5da | ||
|
|
629019c3e4 | ||
|
|
1402fcb234 | ||
|
|
b26276b416 | ||
|
|
e317f04098 | ||
|
|
65ff330602 | ||
|
|
52763e1918 | ||
|
|
23e06cedbd | ||
|
|
b369fcde28 | ||
|
|
c294068780 | ||
|
|
8a774a3dd4 | ||
|
|
53a8b5a275 | ||
|
|
bbd03f49a4 | ||
|
|
e31578e03c | ||
|
|
0855608bc1 | ||
|
|
f8dbf8292a | ||
|
|
144daec800 | ||
|
|
6a832b7173 | ||
|
|
184a9c8da6 | ||
|
|
88592a1779 | ||
|
|
92fa30a787 | ||
|
|
e4dfe78ef0 | ||
|
|
ba84eecd94 | ||
|
|
ea12d76c03 | ||
|
|
5f0a8a4e28 | ||
|
|
2fc095cd3e | ||
|
|
a2341cc412 | ||
|
|
9685be64cd | ||
|
|
39f5059d48 | ||
|
|
a30e80564d | ||
|
|
8e107b9657 | ||
|
|
21a0693b79 | ||
|
|
4846d9393d | ||
|
|
fc4f20d52f | ||
|
|
60558b5d37 | ||
|
|
5990573ccd | ||
|
|
bd11d3cb62 | ||
|
|
5e5578d2c3 | ||
|
|
1318c6aec8 | ||
|
|
f29757de3b | ||
|
|
f397c35935 | ||
|
|
f365230aea | ||
|
|
ff0b8e10af | ||
|
|
8d16a5693c | ||
|
|
781142a73f | ||
|
|
f471a7e3f5 | ||
|
|
d7a1fd2a6b | ||
|
|
7782eda88e | ||
|
|
d08453d402 | ||
|
|
71e98ea584 | ||
|
|
42d997f639 | ||
|
|
571b4c060b | ||
|
|
ff72059a94 | ||
|
|
2e6ef4f6ec | ||
|
|
0ec6dd9f4b | ||
|
|
0b7fdf16a2 | ||
|
|
5edfd31a6d | ||
|
|
7ee7bc87ae | ||
|
|
1433558c01 | ||
|
|
0458b961c5 | ||
|
|
c1998c4efe | ||
|
|
49da220b65 | ||
|
|
554ee0d963 | ||
|
|
2d2533a08a | ||
|
|
733b072d4f | ||
|
|
2d01a65e36 | ||
|
|
b8280521a5 | ||
|
|
60e6af2605 | ||
|
|
9d16822c63 | ||
|
|
38a0946071 | ||
|
|
95e52e1ac3 | ||
|
|
51ab1c940a | ||
|
|
6f30427357 | ||
|
|
3220acc729 | ||
|
|
3c97933416 | ||
|
|
039e2a9649 | ||
|
|
1c01d0b84a | ||
|
|
39eac7a765 | ||
|
|
082a7065b1 | ||
|
|
f7b08a6982 | ||
|
|
37e32d8c80 | ||
|
|
f2a1b991de | ||
|
|
4128e696d6 | ||
|
|
7e7f3de355 | ||
|
|
1f6a1cd26d | ||
|
|
2cfe2354df | ||
|
|
13387c0838 | ||
|
|
5babf2dc5c | ||
|
|
9012d7c6c1 | ||
|
|
df1faa9a8f | ||
|
|
3de7ad5223 | ||
|
|
9cb3a68c38 | ||
|
|
c1dd76788d | ||
|
|
5ee1816a71 | ||
|
|
63b51c6742 | ||
|
|
e7684b7ed5 | ||
|
|
dda23baf42 | ||
|
|
8575abf599 | ||
|
|
feea0532cd | ||
|
|
d3e8ae1820 | ||
|
|
91a9a959a2 | ||
|
|
04eae51d11 | ||
|
|
8fb707e16d | ||
|
|
4138d5aa75 | ||
|
|
fc654a4cec | ||
|
|
26b5f55cba | ||
|
|
3f572e6bf2 | ||
|
|
941ad6bc62 | ||
|
|
5d1d93e163 | ||
|
|
35fba5bfdd | ||
|
|
887834da91 | ||
|
|
107293c80e | ||
|
|
e3c4ebd59a | ||
|
|
d99ffde7c0 | ||
|
|
198c34ce21 | ||
|
|
0eba88bbfe | ||
|
|
aeea4430d5 | ||
|
|
4b15c4215c | ||
|
|
50452207d9 | ||
|
|
01fcad9b9c | ||
|
|
eb41253764 | ||
|
|
89625e54cf | ||
|
|
58f7141c96 | ||
|
|
e56c6402a7 | ||
|
|
d0eb8ddc30 |
@@ -2,7 +2,7 @@ version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
clone_folder: c:\gopath\src\github.com\rclone\rclone
|
||||
|
||||
cache:
|
||||
- '%LocalAppData%\go-build'
|
||||
@@ -16,7 +16,7 @@ environment:
|
||||
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||
PATH: '%PATHCC64%'
|
||||
RCLONE_CONFIG_PASS:
|
||||
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
|
||||
secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk=
|
||||
|
||||
install:
|
||||
- choco install winfsp -y
|
||||
@@ -46,4 +46,4 @@ artifacts:
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make upload_beta
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
@@ -5,7 +6,7 @@ jobs:
|
||||
build:
|
||||
machine: true
|
||||
|
||||
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
|
||||
working_directory: ~/.go_workspace/src/github.com/rclone/rclone
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
@@ -13,10 +14,10 @@ jobs:
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
docker pull rclone/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=billziss/xgo-cgofuse \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
@@ -29,6 +30,21 @@ jobs:
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
|
||||
make circleci_upload
|
||||
fi
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
|
||||
7
.gitattributes
vendored
Normal file
7
.gitattributes
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# Ignore generated files in GitHub language statistics and diffs
|
||||
/MANUAL.* linguist-generated=true
|
||||
/rclone.1 linguist-generated=true
|
||||
|
||||
# Don't fiddle with the line endings of test data
|
||||
**/testdata/** -text
|
||||
**/test/** -text
|
||||
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -10,7 +10,7 @@ instead of filing an issue for a quick response.
|
||||
|
||||
If you are reporting a bug or asking for a new feature then please use one of the templates here:
|
||||
|
||||
https://github.com/ncw/rclone/issues/new
|
||||
https://github.com/rclone/rclone/issues/new
|
||||
|
||||
otherwise fill in the form below.
|
||||
|
||||
|
||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,8 +22,8 @@ Link issues and relevant forum posts here.
|
||||
|
||||
#### Checklist
|
||||
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||
- [ ] I have added documentation for the changes if appropriate.
|
||||
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
- [ ] I'm done, this Pull Request is ready for review :-)
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -5,3 +5,6 @@ build
|
||||
docs/public
|
||||
rclone.iml
|
||||
.idea
|
||||
.history
|
||||
*.test
|
||||
*.log
|
||||
26
.golangci.yml
Normal file
26
.golangci.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
# golangci-lint configuration options
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- golint
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
|
||||
issues:
|
||||
# Enable some lints excluded by default
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"goimports",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"structcheck",
|
||||
"varcheck",
|
||||
"vet"
|
||||
],
|
||||
"EnableGC": true,
|
||||
"Vendor": true
|
||||
}
|
||||
139
.travis.yml
139
.travis.yml
@@ -1,58 +1,129 @@
|
||||
---
|
||||
language: go
|
||||
sudo: required
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- tip
|
||||
go_import_path: github.com/ncw/rclone
|
||||
- linux
|
||||
go_import_path: github.com/rclone/rclone
|
||||
before_install:
|
||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
|
||||
- git fetch --unshallow --tags
|
||||
- |
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
|
||||
choco install -y winfsp zip make
|
||||
cd ../.. # fix crlf in git checkout
|
||||
mv $TRAVIS_REPO_SLUG _old
|
||||
git config --global core.autocrlf false
|
||||
git clone _old $TRAVIS_REPO_SLUG
|
||||
cd $TRAVIS_REPO_SLUG
|
||||
fi
|
||||
install:
|
||||
- git fetch --unshallow --tags
|
||||
- make vars
|
||||
- make build_dep
|
||||
script:
|
||||
- make check
|
||||
- make quicktest
|
||||
- make compile_all
|
||||
- make vars
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
|
||||
- GO111MODULE=off
|
||||
- GITHUB_USER=ncw
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.11.x
|
||||
env: GOTAGS=""
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
- go: 1.9.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.10.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.11.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
name: Linux
|
||||
env:
|
||||
- GOTAGS=cmount
|
||||
- BUILD_FLAGS='-include "^linux/"'
|
||||
- DEPLOY=true
|
||||
script:
|
||||
- make build_dep
|
||||
- make check
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
name: Go Modules / Race
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
- GOPROXY=https://proxy.golang.org
|
||||
script:
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
- go: 1.12.x
|
||||
name: Other OS
|
||||
env:
|
||||
- DEPLOY=true
|
||||
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
|
||||
script:
|
||||
- make
|
||||
- go: 1.12.x
|
||||
name: macOS
|
||||
os: osx
|
||||
env:
|
||||
- GOTAGS= # cmount doesn't work on osx travis for some reason
|
||||
- BUILD_FLAGS='-include "^darwin/" -cgo'
|
||||
- DEPLOY=true
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
script:
|
||||
- make
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
# - os: windows
|
||||
# name: Windows
|
||||
# go: 1.12.x
|
||||
# env:
|
||||
# - GOTAGS=cmount
|
||||
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
|
||||
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
|
||||
# #filter_secrets: false # works around a problem with secrets under windows
|
||||
# cache:
|
||||
# directories:
|
||||
# - ${LocalAppData}/go-build
|
||||
# script:
|
||||
# - make
|
||||
# - make quicktest
|
||||
# - make racequicktest
|
||||
- go: tip
|
||||
script:
|
||||
- make quicktest
|
||||
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
script:
|
||||
- make beta
|
||||
- [[ "$TRAVIS_PULL_REQUEST" == "false" ]] && make upload_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
repo: rclone/rclone
|
||||
all_branches: true
|
||||
go: 1.11.x
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
condition: $DEPLOY == true
|
||||
|
||||
@@ -29,12 +29,12 @@ You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's GitHub
|
||||
page](https://github.com/ncw/rclone).
|
||||
page](https://github.com/rclone/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get -u github.com/ncw/rclone
|
||||
cd $GOPATH/src/github.com/ncw/rclone
|
||||
go get -u github.com/rclone/rclone
|
||||
cd $GOPATH/src/github.com/rclone/rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
|
||||
@@ -127,7 +127,7 @@ If you want to use the integration test framework to run these tests
|
||||
all together with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
@@ -135,7 +135,7 @@ then change into the project root and run
|
||||
|
||||
make test
|
||||
|
||||
This command is run daily on the the integration test server. You can
|
||||
This command is run daily on the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
|
||||
## Code Organisation ##
|
||||
@@ -351,6 +351,12 @@ Unit tests
|
||||
Integration tests
|
||||
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Once you've done that then you can use the integration test framework from the project root:
|
||||
* go install ./...
|
||||
* test_all -backend remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
@@ -372,4 +378,3 @@ Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
* `cmd/cmd.go` - the main help for rclone
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
* Nick Craig-Wood @ncw
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
* Alex Chen @Cnly
|
||||
* Sandeep Ummadi @sandeepkru
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
@@ -48,7 +51,7 @@ The milestones have these meanings:
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
|
||||
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
|
||||
## Closing Tickets ##
|
||||
|
||||
|
||||
6127
MANUAL.html
generated
6127
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
6352
MANUAL.txt
generated
6352
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
87
Makefile
87
Makefile
@@ -1,5 +1,5 @@
|
||||
SHELL = bash
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
ifeq ($(BRANCH),$(LAST_TAG))
|
||||
BRANCH := master
|
||||
@@ -11,28 +11,30 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
# Run full tests if go >= go1.11
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
ifdef BETA_SUBDIR
|
||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||
endif
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||
endif
|
||||
|
||||
.PHONY: rclone vars version
|
||||
|
||||
rclone:
|
||||
touch fs/version.go
|
||||
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
cp -av `go env GOPATH`/bin/rclone .
|
||||
|
||||
vars:
|
||||
@@ -42,7 +44,6 @@ vars:
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo FULL_TESTS="'$(FULL_TESTS)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
version:
|
||||
@@ -50,45 +51,26 @@ version:
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
-test_all 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test_all.log"
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||
ifdef FULL_TESTS
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||
endif
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
ifdef FULL_TESTS
|
||||
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
errcheck $(BUILDTAGS) ./...
|
||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
else
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
|
||||
gometalinter_install:
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install --update
|
||||
|
||||
# We aren't using gometalinter as the default linter yet because
|
||||
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
|
||||
# 2. can't get -printfuncs working with the vet linter
|
||||
gometalinter:
|
||||
gometalinter ./...
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@golangci-lint run $(LINTTAGS) ./...
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef FULL_TESTS
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u golang.org/x/lint/golint
|
||||
endif
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
@@ -116,10 +98,10 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
./bin/make_backend_docs.py
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
@@ -163,7 +145,7 @@ upload_github:
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
|
||||
beta:
|
||||
test_beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
@@ -172,39 +154,32 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
ifdef FULL_TESTS
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
|
||||
else
|
||||
@echo Skipping compile all as version of go too old
|
||||
endif
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
circleci_upload:
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
beta:
|
||||
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
|
||||
upload_beta: rclone
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the binary builds from travis and appveyor
|
||||
fetch_binaries:
|
||||
rclone -P sync $(BETA_UPLOAD) build/
|
||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
26
README.md
26
README.md
@@ -7,12 +7,13 @@
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone)
|
||||
[](https://ci.appveyor.com/project/ncw/rclone)
|
||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||
[](https://godoc.org/github.com/ncw/rclone)
|
||||
[](https://travis-ci.org/rclone/rclone)
|
||||
[](https://ci.appveyor.com/project/rclone/rclone)
|
||||
[](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
|
||||
[](https://circleci.com/gh/rclone/rclone/tree/master)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
|
||||
# Rclone
|
||||
|
||||
@@ -20,6 +21,8 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
@@ -31,10 +34,12 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
@@ -43,13 +48,14 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* Openstack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
@@ -60,16 +66,18 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||
|
||||
## Installation & documentation
|
||||
|
||||
@@ -91,4 +99,4 @@ License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
[COPYING file](/rclone/COPYING) included in this package).
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
@@ -11,7 +11,7 @@ Making a release
|
||||
* edit docs/content/changelog.md
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX"
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push --tags origin master
|
||||
* # Wait for the appveyor and travis builds to complete then...
|
||||
@@ -27,6 +27,7 @@ Making a release
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* GO111MODULE=on go get -u github.com/spf13/cobra@master
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
|
||||
234
azure-pipelines.yml
Normal file
234
azure-pipelines.yml
Normal file
@@ -0,0 +1,234 @@
|
||||
---
|
||||
# Azure pipelines build for rclone
|
||||
# Parts stolen shamelessly from all round the Internet, especially Caddy
|
||||
# -*- compile-command: "yamllint -f parsable azure-pipelines.yml" -*-
|
||||
|
||||
trigger:
|
||||
branches:
|
||||
include:
|
||||
- '*'
|
||||
tags:
|
||||
include:
|
||||
- '*'
|
||||
|
||||
variables:
|
||||
GOROOT: $(gorootDir)/go
|
||||
GOPATH: $(system.defaultWorkingDirectory)/gopath
|
||||
GOCACHE: $(system.defaultWorkingDirectory)/gocache
|
||||
GOBIN: $(GOPATH)/bin
|
||||
GOMAXPROCS: 8 # workaround for cmd/mount tests locking up - see #3154
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
|
||||
GO111MODULE: 'off'
|
||||
GOTAGS: cmount
|
||||
GO_LATEST: false
|
||||
CPATH: ''
|
||||
GO_INSTALL_ARCH: amd64
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
linux:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: cmount
|
||||
BUILD_FLAGS: '-include "^linux/"'
|
||||
MAKE_CHECK: true
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
mac:
|
||||
imageName: macos-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: "" # cmount doesn't work on osx travis for some reason
|
||||
BUILD_FLAGS: '-include "^darwin/" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows_amd64:
|
||||
imageName: windows-latest
|
||||
gorootDir: C:\
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-include "^windows/amd64" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows_386:
|
||||
imageName: windows-latest
|
||||
gorootDir: C:\
|
||||
GO_VERSION: latest
|
||||
GO_INSTALL_ARCH: 386
|
||||
BUILD_FLAGS: '-include "^windows/386" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
other_os:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
|
||||
DEPLOY: true
|
||||
modules_race:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GO111MODULE: on
|
||||
GOPROXY: https://proxy.golang.org
|
||||
MAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
go1.9:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GOCACHE: '' # build caching only came in go1.10
|
||||
GO_VERSION: go1.9.7
|
||||
MAKE_QUICKTEST: true
|
||||
go1.10:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.10.8
|
||||
MAKE_QUICKTEST: true
|
||||
go1.11:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.11.12
|
||||
MAKE_QUICKTEST: true
|
||||
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
|
||||
steps:
|
||||
- bash: |
|
||||
latestGo=$(curl "https://golang.org/VERSION?m=text")
|
||||
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
|
||||
echo "##vso[task.setvariable variable=GO_LATEST]true"
|
||||
echo "Latest Go version: $latestGo"
|
||||
condition: eq( variables['GO_VERSION'], 'latest' )
|
||||
displayName: "Get latest Go version"
|
||||
|
||||
- bash: |
|
||||
sudo rm -f $(which go)
|
||||
echo '##vso[task.prependpath]$(GOBIN)'
|
||||
echo '##vso[task.prependpath]$(GOROOT)/bin'
|
||||
mkdir -p '$(modulePath)'
|
||||
shopt -s extglob
|
||||
shopt -s dotglob
|
||||
mv !(gopath) '$(modulePath)'
|
||||
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
|
||||
|
||||
- task: CacheBeta@0
|
||||
continueOnError: true
|
||||
inputs:
|
||||
key: go-build-cache | "$(Agent.JobName)"
|
||||
path: $(GOCACHE)
|
||||
displayName: Cache go build
|
||||
condition: ne( variables['GOCACHE'], '' )
|
||||
|
||||
# Install Libraries (varies by platform)
|
||||
|
||||
- bash: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
displayName: Install Libraries on Linux
|
||||
|
||||
- bash: |
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
displayName: Install Libraries on macOS
|
||||
|
||||
- powershell: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GO_INSTALL_ARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "##vso[task.prependpath]C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
$path = (get-command mingw32-make.exe).Path
|
||||
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Libraries on Windows
|
||||
|
||||
|
||||
# Install Go (this varies by platform)
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
|
||||
sudo mkdir $(gorootDir)
|
||||
sudo chown ${USER}:${USER} $(gorootDir)
|
||||
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
displayName: Install Go on Linux
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
|
||||
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
displayName: Install Go on macOS
|
||||
|
||||
- powershell: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
Write-Host "Downloading Go $(GO_VERSION) for $(GO_INSTALL_ARCH)"
|
||||
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip", "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip")
|
||||
Write-Host "Extracting Go"
|
||||
Expand-Archive "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip" -DestinationPath "$(gorootDir)"
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Go on Windows
|
||||
|
||||
# Display environment for debugging
|
||||
|
||||
- bash: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Print Go version and environment
|
||||
|
||||
# Run Tests
|
||||
|
||||
- bash: |
|
||||
make quicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Run tests
|
||||
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make racequicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Race test
|
||||
condition: eq( variables['MAKE_RACEQUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make build_dep
|
||||
make check
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Code quality test
|
||||
condition: eq( variables['MAKE_CHECK'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make beta
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Do release build
|
||||
condition: eq( variables['DEPLOY'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make upload_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: $(RCLONE_CONFIG_PASS)
|
||||
BETA_SUBDIR: 'azure_pipelines' # FIXME remove when removing travis/appveyor
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Upload built binaries
|
||||
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )
|
||||
|
||||
- publish: $(modulePath)/build
|
||||
artifact: "rclone-build-$(Agent.JobName)"
|
||||
displayName: Publish built binaries
|
||||
condition: eq( variables['DEPLOY'], 'true' )
|
||||
@@ -4,17 +4,17 @@ import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "alias",
|
||||
Description: "Alias for a existing remote",
|
||||
Description: "Alias for an existing remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
@@ -30,7 +30,7 @@ type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path.
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local" // pull in test backend
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -69,7 +70,7 @@ func TestNewFS(t *testing.T) {
|
||||
prepare(t, remoteRoot)
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||
require.NoError(t, err, what)
|
||||
gotEntries, err := f.List(test.fsList)
|
||||
gotEntries, err := f.List(context.Background(), test.fsList)
|
||||
require.NoError(t, err, what)
|
||||
|
||||
sort.Sort(gotEntries)
|
||||
@@ -80,7 +81,7 @@ func TestNewFS(t *testing.T) {
|
||||
wantEntry := test.entries[i]
|
||||
|
||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
|
||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||
_, isDir := gotEntry.(fs.Directory)
|
||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||
}
|
||||
|
||||
@@ -2,30 +2,33 @@ package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/backend/alias"
|
||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/backend/azureblob"
|
||||
_ "github.com/ncw/rclone/backend/b2"
|
||||
_ "github.com/ncw/rclone/backend/box"
|
||||
_ "github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
_ "github.com/ncw/rclone/backend/dropbox"
|
||||
_ "github.com/ncw/rclone/backend/ftp"
|
||||
_ "github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
_ "github.com/ncw/rclone/backend/opendrive"
|
||||
_ "github.com/ncw/rclone/backend/pcloud"
|
||||
_ "github.com/ncw/rclone/backend/qingstor"
|
||||
_ "github.com/ncw/rclone/backend/s3"
|
||||
_ "github.com/ncw/rclone/backend/sftp"
|
||||
_ "github.com/ncw/rclone/backend/swift"
|
||||
_ "github.com/ncw/rclone/backend/union"
|
||||
_ "github.com/ncw/rclone/backend/webdav"
|
||||
_ "github.com/ncw/rclone/backend/yandex"
|
||||
_ "github.com/rclone/rclone/backend/alias"
|
||||
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/azureblob"
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,7 @@ we ignore assets completely!
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -21,19 +22,18 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
@@ -155,7 +155,7 @@ type Fs struct {
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
trueRootID string // ID of true root directory
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
@@ -247,6 +247,7 @@ func filterRequest(req *http.Request) {
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -273,7 +274,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
@@ -308,7 +309,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.dirCache = dircache.New(root, f.trueRootID, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(false)
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
@@ -316,12 +317,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -331,7 +332,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -353,7 +354,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -362,7 +363,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error)
|
||||
// Set info but not meta
|
||||
o.info = info
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -372,12 +373,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error)
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
@@ -404,7 +405,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
@@ -502,12 +503,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(false)
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -525,7 +526,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
d := fs.NewDir(remote, when).SetID(*node.Id)
|
||||
entries = append(entries, d)
|
||||
case fileKind:
|
||||
o, err := f.newObjectWithInfo(remote, node)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, node)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
@@ -569,7 +570,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// At the end of large uploads. The speculation is that the timeout
|
||||
// is waiting for the sha1 hashing to complete and the file may well
|
||||
// be properly uploaded.
|
||||
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
|
||||
func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
|
||||
// Return if no error - all is well
|
||||
if inErr == nil {
|
||||
return false, inInfo, inErr
|
||||
@@ -609,7 +610,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
||||
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
|
||||
remote := src.Remote()
|
||||
for i := 1; i <= retries; i++ {
|
||||
o, err := f.NewObject(remote)
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
|
||||
} else if err != nil {
|
||||
@@ -635,7 +636,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
size := src.Size()
|
||||
// Temporary Object under construction
|
||||
@@ -644,17 +645,17 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
remote: remote,
|
||||
}
|
||||
// Check if object already exists
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(ctx)
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(in, src, options...)
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// If not create it
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -670,7 +671,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
info, resp, err = folder.Put(in, leaf)
|
||||
f.tokenRenewer.Stop()
|
||||
var ok bool
|
||||
ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start))
|
||||
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
@@ -684,13 +685,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
err := f.dirCache.FindRoot(true)
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(dir, true)
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -704,7 +705,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
@@ -713,15 +714,15 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// create the destination directory if necessary
|
||||
err := f.dirCache.FindRoot(true)
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
|
||||
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
|
||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -737,12 +738,12 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcErr, dstErr error
|
||||
)
|
||||
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
||||
_, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object
|
||||
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
||||
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
||||
// exit if error on source
|
||||
return nil, srcErr
|
||||
}
|
||||
dstObj, dstErr = f.NewObject(remote)
|
||||
dstObj, dstErr = f.NewObject(ctx, remote)
|
||||
if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
|
||||
// exit if error on dst
|
||||
return nil, dstErr
|
||||
@@ -771,7 +772,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(src, "DirMove error: not same remote type")
|
||||
@@ -787,14 +788,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err = srcFs.dirCache.FindRoot(false)
|
||||
err = srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(true)
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -809,14 +810,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true)
|
||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
@@ -832,7 +833,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
if srcRemote == "" {
|
||||
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
||||
} else {
|
||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false)
|
||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -840,7 +841,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcLeaf, _ := dircache.SplitPath(srcPath)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -873,17 +874,17 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
|
||||
// purgeCheck remotes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
root := path.Join(f.root, dir)
|
||||
if root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(false)
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(dir, false)
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -932,8 +933,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
@@ -955,7 +956,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
// srcObj, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -966,7 +967,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// return f.NewObject(remote), nil
|
||||
// return f.NewObject(ctx, remote), nil
|
||||
//}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
@@ -974,8 +975,8 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -999,7 +1000,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1022,11 +1023,11 @@ func (o *Object) Size() int64 {
|
||||
// it also sets the info
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.info != nil {
|
||||
return nil
|
||||
}
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
@@ -1055,8 +1056,8 @@ func (o *Object) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -1070,7 +1071,7 @@ func (o *Object) ModTime() time.Time {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
// FIXME not implemented
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
@@ -1081,7 +1082,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
||||
if bigObject {
|
||||
fs.Debugf(o, "Downloading large object via tempLink")
|
||||
@@ -1093,7 +1094,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if !bigObject {
|
||||
in, resp, err = file.OpenHeaders(headers)
|
||||
} else {
|
||||
in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
|
||||
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
|
||||
}
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
@@ -1103,7 +1104,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
file := acd.File{Node: o.info}
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
@@ -1114,7 +1115,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
info, resp, err = file.Overwrite(in)
|
||||
o.fs.tokenRenewer.Stop()
|
||||
var ok bool
|
||||
ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start))
|
||||
ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
@@ -1139,7 +1140,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.removeNode(o.info)
|
||||
}
|
||||
|
||||
@@ -1261,7 +1262,7 @@ OnConflict:
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
|
||||
return *o.info.ContentProperties.ContentType
|
||||
}
|
||||
@@ -1274,7 +1275,7 @@ func (o *Object) MimeType() string {
|
||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
checkpoint := f.opt.Checkpoint
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -7,9 +7,9 @@ package amazonclouddrive_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -22,16 +22,18 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -51,6 +53,11 @@ const (
|
||||
maxUploadCutoff = 256 * fs.MebiByte
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
// Default storage account, key and blob endpoint for emulator support,
|
||||
// though it is a base64 key checked in here, it is publicly available secret.
|
||||
emulatorAccount = "devstoreaccount1"
|
||||
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -61,13 +68,17 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Storage Account Name (leave blank to use connection string or SAS URL)",
|
||||
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Storage Account Key (leave blank to use connection string or SAS URL)",
|
||||
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
|
||||
}, {
|
||||
Name: "sas_url",
|
||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)",
|
||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
|
||||
}, {
|
||||
Name: "use_emulator",
|
||||
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service\nLeave blank normally.",
|
||||
@@ -75,7 +86,7 @@ func init() {
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
@@ -83,7 +94,7 @@ func init() {
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
@@ -127,6 +138,7 @@ type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@@ -135,13 +147,14 @@ type Fs struct {
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
containerDeleted bool // true if we have deleted the container
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
|
||||
@@ -272,8 +285,41 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||
// to a rclone's http.Client.
|
||||
//
|
||||
// copied from azblob.newDefaultHTTPClientFactory
|
||||
func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
r, err := client.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = pipeline.NewError(err, "HTTP request failed")
|
||||
}
|
||||
return pipeline.NewHTTPResponse(r), err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// newPipeline creates a Pipeline using the specified credentials and options.
|
||||
//
|
||||
// this code was copied from azblob.NewPipeline
|
||||
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
factories := []pipeline.Factory{
|
||||
azblob.NewTelemetryPolicyFactory(o.Telemetry),
|
||||
azblob.NewUniqueRequestIDPolicyFactory(),
|
||||
azblob.NewRetryPolicyFactory(o.Retry),
|
||||
c,
|
||||
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
|
||||
azblob.NewRequestLogPolicyFactory(o.RequestLog),
|
||||
}
|
||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -307,12 +353,41 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
containerURL azblob.ContainerURL
|
||||
)
|
||||
switch {
|
||||
case opt.UseEmulator:
|
||||
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
||||
}
|
||||
u, err = url.Parse(emulatorBlobEndpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
if err != nil {
|
||||
@@ -323,7 +398,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
@@ -332,7 +407,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
@@ -340,7 +415,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
|
||||
container = parts.ContainerName
|
||||
f.container = parts.ContainerName
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
@@ -349,24 +424,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
f.cntURL = &containerURL
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
svcURL: &serviceURL,
|
||||
cntURL: &containerURL,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
@@ -378,10 +438,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
f.root += "/"
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = oldRoot
|
||||
return f, nil
|
||||
}
|
||||
@@ -417,7 +477,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object,
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
@@ -437,6 +497,21 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
|
||||
@@ -444,7 +519,7 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
// the container and root supplied
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
f.containerOKMu.Lock()
|
||||
deleted := f.containerDeleted
|
||||
f.containerOKMu.Unlock()
|
||||
@@ -471,7 +546,7 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
Prefix: root,
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
ctx := context.Background()
|
||||
directoryMarkers := map[string]struct{}{}
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -501,13 +576,23 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(f.root):]
|
||||
// Check for directory
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, isDirectory)
|
||||
err = fn(remote, file, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -520,6 +605,10 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote = remote[len(f.root):]
|
||||
// Don't send if already sent as a directory marker
|
||||
if _, found := directoryMarkers[remote]; found {
|
||||
continue
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, nil, true)
|
||||
if err != nil {
|
||||
@@ -554,8 +643,8 @@ func (f *Fs) markContainerOK() {
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.list(ctx, dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -598,11 +687,11 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.container == "" {
|
||||
return f.listContainers(dir)
|
||||
}
|
||||
return f.listDir(dir)
|
||||
return f.listDir(ctx, dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -621,12 +710,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.container == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(ctx, dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -678,22 +767,60 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return fs, fs.Update(in, src, options...)
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Check if the container exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after container deletion
|
||||
func (f *Fs) dirExists() (bool, error) {
|
||||
options := azblob.ListBlobsSegmentOptions{
|
||||
Details: azblob.BlobListingDetails{
|
||||
Copy: false,
|
||||
Metadata: false,
|
||||
Snapshots: false,
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
MaxResults: 1,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
if !f.containerDeleted {
|
||||
exists, err := f.dirExists()
|
||||
if err == nil {
|
||||
f.containerOK = exists
|
||||
}
|
||||
if err != nil || exists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// now try to create the container
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -726,9 +853,9 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
|
||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(dir string) (err error) {
|
||||
func (f *Fs) isEmpty(ctx context.Context, dir string) (err error) {
|
||||
empty := true
|
||||
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(ctx, dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -775,8 +902,8 @@ func (f *Fs) deleteContainer() error {
|
||||
// Rmdir deletes the container if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
err := f.isEmpty(dir)
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
err := f.isEmpty(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -797,7 +924,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge() error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
dir := "" // forward compat!
|
||||
if f.root != "" || dir != "" {
|
||||
// Delegate to caller if not root container
|
||||
@@ -815,8 +942,8 @@ func (f *Fs) Purge() error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir("")
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -834,7 +961,6 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
options := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
var startCopy *azblob.BlobStartCopyFromURLResponse
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -855,7 +981,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
copyStatus = getMetadata.CopyStatus()
|
||||
}
|
||||
|
||||
return f.NewObject(remote)
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -879,7 +1005,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -923,27 +1049,37 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = info.ContentLength()
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
o.size = size
|
||||
o.modTime = info.LastModified()
|
||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||
o.setMetadata(info.NewMetadata())
|
||||
o.setMetadata(metadata)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
metadata := info.Metadata
|
||||
size := *info.Properties.ContentLength
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
|
||||
o.mimeType = *info.Properties.ContentType
|
||||
o.size = *info.Properties.ContentLength
|
||||
o.size = size
|
||||
o.modTime = info.Properties.LastModified
|
||||
o.accessTier = info.Properties.AccessTier
|
||||
o.setMetadata(info.Metadata)
|
||||
o.setMetadata(metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -989,12 +1125,6 @@ func (o *Object) readMetaData() (err error) {
|
||||
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
||||
}
|
||||
|
||||
// timeString returns modTime as the number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC as a decimal string.
|
||||
func timeString(modTime time.Time) string {
|
||||
return strconv.FormatInt(modTime.UnixNano()/1E6, 10)
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
@@ -1015,14 +1145,14 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() (result time.Time) {
|
||||
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||
// The error is logged in readMetaData
|
||||
_ = o.readMetaData()
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
// Make sure o.meta is not nil
|
||||
if o.meta == nil {
|
||||
o.meta = make(map[string]string, 1)
|
||||
@@ -1031,7 +1161,6 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
@@ -1049,7 +1178,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
@@ -1073,7 +1202,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
var dowloadResponse *azblob.DownloadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1262,31 +1390,31 @@ outer:
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
err = o.fs.Mkdir("")
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
err = o.fs.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size := src.Size()
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime())
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blob := o.getBlobReference()
|
||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||
httpHeaders.ContentType = fs.MimeType(o)
|
||||
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
||||
// MD5 only for PutBlob requests
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
httpHeaders.ContentType = fs.MimeType(ctx, o)
|
||||
// Compute the Content-MD5 of the file, for multiparts uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
|
||||
// in order to validate its integrity during transport
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1299,14 +1427,13 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multpart upload to work around.
|
||||
// See: https://github.com/ncw/rclone/issues/2653
|
||||
// See: https://github.com/rclone/rclone/issues/2653
|
||||
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
multipartUpload = true
|
||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if multipartUpload {
|
||||
@@ -1339,11 +1466,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
blob := o.getBlobReference()
|
||||
snapShotOptions := azblob.DeleteSnapshotsOptionNone
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
||||
return o.fs.shouldRetry(err)
|
||||
@@ -1351,7 +1477,7 @@ func (o *Object) Remove() error {
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
// +build plan9 solaris
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
@@ -17,12 +17,12 @@ type Error struct {
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
// Error satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
// Fatal satisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
@@ -100,7 +100,7 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
// IsZero returns true if the timestamp is uninitialized
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return time.Time(t).IsZero()
|
||||
}
|
||||
@@ -136,6 +136,7 @@ type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
@@ -188,6 +189,21 @@ type GetUploadURLResponse struct {
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||
}
|
||||
|
||||
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
|
||||
type GetDownloadAuthorizationRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
|
||||
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
|
||||
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
|
||||
}
|
||||
|
||||
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
|
||||
type GetDownloadAuthorizationResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
|
||||
}
|
||||
|
||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||
type FileInfo struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
@@ -310,3 +326,14 @@ type CancelLargeFileResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
}
|
||||
|
||||
// CopyFileRequest is as passed to b2_copy_file
|
||||
type CopyFileRequest struct {
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
Name string `json:"fileName"` // The name of the new file being created.
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
526
backend/b2/b2.go
526
backend/b2/b2.go
@@ -7,6 +7,7 @@ package b2
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
@@ -19,18 +20,18 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -108,7 +109,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
@@ -117,8 +118,30 @@ This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
When uploading large files, chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||
minimim size.`,
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_url",
|
||||
Help: `Custom endpoint for downloads.
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
This is probably only useful for a public bucket.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_auth_duration",
|
||||
Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
|
||||
|
||||
The duration before the download authorization token will expire.
|
||||
The minimum value is 1 second. The maximum value is one week.`,
|
||||
Default: fs.Duration(7 * 24 * time.Hour),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -126,34 +149,39 @@ minimim size.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketIDMutex sync.Mutex // mutex to protect _bucketID
|
||||
_bucketID string // the ID of the bucket we are working on
|
||||
info api.AuthorizeAccountResponse // result of authorize call
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadURLResponse // result of get upload URL calls
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
bufferTokens chan []byte // control concurrency of multipart uploads
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketIDMutex sync.Mutex // mutex to protect _bucketID
|
||||
_bucketID string // the ID of the bucket we are working on
|
||||
bucketTypeMutex sync.Mutex // mutex to protect _bucketType
|
||||
_bucketType string // the Type of the bucket we are working on
|
||||
info api.AuthorizeAccountResponse // result of authorize call
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadURLResponse // result of get upload URL calls
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
bufferTokens chan []byte // control concurrency of multipart uploads
|
||||
}
|
||||
|
||||
// Object describes a b2 object
|
||||
@@ -236,13 +264,7 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
|
||||
}
|
||||
}
|
||||
retryAfterDuration := time.Duration(retryAfter) * time.Second
|
||||
if f.pacer.GetSleep() < retryAfterDuration {
|
||||
fs.Debugf(f, "Setting sleep to %v after error: %v", retryAfterDuration, err)
|
||||
// We set 1/2 the value here because the pacer will double it immediately
|
||||
f.pacer.SetSleep(retryAfterDuration / 2)
|
||||
}
|
||||
return true, err
|
||||
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
@@ -313,8 +335,9 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -348,7 +371,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -368,6 +391,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.bucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
@@ -382,7 +412,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
f.root += "/"
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -421,6 +451,16 @@ func (f *Fs) authorizeAccount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||
func (f *Fs) hasPermission(permission string) bool {
|
||||
for _, capability := range f.info.Allowed.Capabilities {
|
||||
if capability == permission {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
@@ -500,7 +540,7 @@ func (f *Fs) putUploadBlock(buf []byte) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.File) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -511,7 +551,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.File) (fs.Object, error)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -521,8 +561,8 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.File) (fs.Object, error)
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
@@ -546,7 +586,7 @@ var errEndList = errors.New("end list")
|
||||
// than 1000)
|
||||
//
|
||||
// If hidden is set then it will list the hidden (deleted) files too.
|
||||
func (f *Fs) list(dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error {
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error {
|
||||
root := f.root
|
||||
if dir != "" {
|
||||
root += dir + "/"
|
||||
@@ -627,7 +667,7 @@ func (f *Fs) list(dir string, recurse bool, prefix string, limit int, hidden boo
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{})
|
||||
return d, nil
|
||||
@@ -641,7 +681,7 @@ func (f *Fs) itemToDirEntry(remote string, object *api.File, isDirectory bool, l
|
||||
if object.Action == "hide" {
|
||||
return nil, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -658,10 +698,10 @@ func (f *Fs) markBucketOK() {
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
last := ""
|
||||
err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
err = f.list(ctx, dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -703,11 +743,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.bucket == "" {
|
||||
return f.listBuckets(dir)
|
||||
}
|
||||
return f.listDir(dir)
|
||||
return f.listDir(ctx, dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -726,14 +766,14 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
last := ""
|
||||
err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
err = f.list(ctx, dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -778,6 +818,42 @@ func (f *Fs) listBucketsToFn(fn listBucketFn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getbucketType finds the bucketType for the current bucket name
|
||||
// can be one of allPublic. allPrivate, or snapshot
|
||||
func (f *Fs) getbucketType() (bucketType string, err error) {
|
||||
f.bucketTypeMutex.Lock()
|
||||
defer f.bucketTypeMutex.Unlock()
|
||||
if f._bucketType != "" {
|
||||
return f._bucketType, nil
|
||||
}
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
if bucket.Name == f.bucket {
|
||||
bucketType = bucket.Type
|
||||
}
|
||||
return nil
|
||||
|
||||
})
|
||||
if bucketType == "" {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
f._bucketType = bucketType
|
||||
return bucketType, err
|
||||
}
|
||||
|
||||
// setBucketType sets the Type for the current bucket name
|
||||
func (f *Fs) setBucketType(Type string) {
|
||||
f.bucketTypeMutex.Lock()
|
||||
f._bucketType = Type
|
||||
f.bucketTypeMutex.Unlock()
|
||||
}
|
||||
|
||||
// clearBucketType clears the Type for the current bucket name
|
||||
func (f *Fs) clearBucketType() {
|
||||
f.bucketTypeMutex.Lock()
|
||||
f._bucketType = ""
|
||||
f.bucketTypeMutex.Unlock()
|
||||
}
|
||||
|
||||
// getBucketID finds the ID for the current bucket name
|
||||
func (f *Fs) getBucketID() (bucketID string, err error) {
|
||||
f.bucketIDMutex.Lock()
|
||||
@@ -818,22 +894,22 @@ func (f *Fs) clearBucketID() {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return fs, fs.Update(in, src, options...)
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
@@ -872,6 +948,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
return errors.Wrap(err, "failed to create bucket")
|
||||
}
|
||||
f.setBucketID(response.ID)
|
||||
f.setBucketType(response.Type)
|
||||
f.bucketOK = true
|
||||
return nil
|
||||
}
|
||||
@@ -879,7 +956,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
@@ -907,6 +984,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
f.bucketOK = false
|
||||
f.clearBucketID()
|
||||
f.clearBucketType()
|
||||
f.clearUploadURL()
|
||||
return nil
|
||||
}
|
||||
@@ -936,6 +1014,13 @@ func (f *Fs) hide(Name string) error {
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Code == "already_hidden" {
|
||||
// sometimes eventual consistency causes this, so
|
||||
// ignore this error since it is harmless
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Wrapf(err, "failed to hide %q", Name)
|
||||
}
|
||||
return nil
|
||||
@@ -967,7 +1052,7 @@ func (f *Fs) deleteByID(ID, Name string) error {
|
||||
// if oldOnly is true then it deletes only non current files.
|
||||
//
|
||||
// Implemented here so we can make sure we delete old versions.
|
||||
func (f *Fs) purge(oldOnly bool) error {
|
||||
func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
|
||||
var errReturn error
|
||||
var checkErrMutex sync.Mutex
|
||||
var checkErr = func(err error) {
|
||||
@@ -980,6 +1065,12 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
errReturn = err
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
@@ -989,20 +1080,33 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for object := range toBeDeleted {
|
||||
accounting.Stats.Checking(object.Name)
|
||||
checkErr(f.deleteByID(object.ID, object.Name))
|
||||
accounting.Stats.DoneChecking(object.Name)
|
||||
oi, err := f.newObjectWithInfo(ctx, object.Name, object)
|
||||
if err != nil {
|
||||
fs.Errorf(object.Name, "Can't create object %v", err)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
err = f.deleteByID(object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
last := ""
|
||||
checkErr(f.list("", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
|
||||
checkErr(f.list(ctx, "", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if !isDirectory {
|
||||
accounting.Stats.Checking(remote)
|
||||
oi, err := f.newObjectWithInfo(ctx, object.Name, object)
|
||||
if err != nil {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
if oldOnly && last != remote {
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
||||
}
|
||||
@@ -1011,7 +1115,7 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
last = remote
|
||||
accounting.Stats.DoneChecking(remote)
|
||||
tr.Done(nil)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
@@ -1019,19 +1123,71 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
wg.Wait()
|
||||
|
||||
if !oldOnly {
|
||||
checkErr(f.Rmdir(""))
|
||||
checkErr(f.Rmdir(ctx, ""))
|
||||
}
|
||||
return errReturn
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purge(false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purge(ctx, false)
|
||||
}
|
||||
|
||||
// CleanUp deletes all the hidden files.
|
||||
func (f *Fs) CleanUp() error {
|
||||
return f.purge(true)
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.purge(ctx, true)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
destBucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_copy_file",
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: srcObj.id,
|
||||
Name: f.root + remote,
|
||||
MetadataDirective: "COPY",
|
||||
DestBucketID: destBucketID,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err = o.decodeMetaDataFileInfo(&response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
@@ -1039,6 +1195,77 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
|
||||
// getDownloadAuthorization returns authorization token for downloading
|
||||
// without accout.
|
||||
func (f *Fs) getDownloadAuthorization(remote string) (authorization string, err error) {
|
||||
validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
|
||||
if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
|
||||
return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
|
||||
}
|
||||
if !f.hasPermission("shareFiles") {
|
||||
return "", errors.New("sharing a file link requires the shareFiles permission")
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_download_authorization",
|
||||
}
|
||||
var request = api.GetDownloadAuthorizationRequest{
|
||||
BucketID: bucketID,
|
||||
FileNamePrefix: path.Join(f.root, remote),
|
||||
ValidDurationInSeconds: validDurationInSeconds,
|
||||
}
|
||||
var response api.GetDownloadAuthorizationResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get download authorization")
|
||||
}
|
||||
return response.AuthorizationToken, nil
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without accout.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
var RootURL string
|
||||
if f.opt.DownloadURL == "" {
|
||||
RootURL = f.info.DownloadURL
|
||||
} else {
|
||||
RootURL = f.opt.DownloadURL
|
||||
}
|
||||
_, err = f.NewObject(ctx, remote)
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
err2 := f.list(ctx, remote, false, "", 1, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = nil
|
||||
return nil
|
||||
})
|
||||
if err2 != nil {
|
||||
return "", err2
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
absPath := "/" + path.Join(f.root, remote)
|
||||
link = RootURL + "/file/" + urlEncode(f.bucket) + absPath
|
||||
bucketType, err := f.getbucketType()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if bucketType == "allPrivate" || bucketType == "snapshot" {
|
||||
AuthorizationToken, err := f.getDownloadAuthorization(remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
link += "?Authorization=" + AuthorizationToken
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1060,13 +1287,13 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the Sha-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.sha1 == "" {
|
||||
// Error is logged in readMetaData
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1122,17 +1349,8 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
|
||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.id != "" {
|
||||
return nil
|
||||
}
|
||||
// getMetaData gets the metadata from the object unconditionally
|
||||
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
maxSearched := 1
|
||||
var timestamp api.Timestamp
|
||||
baseRemote := o.remote
|
||||
@@ -1140,8 +1358,8 @@ func (o *Object) readMetaData() (err error) {
|
||||
timestamp, baseRemote = api.RemoveVersion(baseRemote)
|
||||
maxSearched = maxVersions
|
||||
}
|
||||
var info *api.File
|
||||
err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
|
||||
err = o.fs.list(ctx, "", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
@@ -1155,12 +1373,30 @@ func (o *Object) readMetaData() (err error) {
|
||||
})
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if info == nil {
|
||||
return fs.ErrorObjectNotFound
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.id != "" {
|
||||
return nil
|
||||
}
|
||||
info, err := o.getMetaData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.decodeMetaData(info)
|
||||
}
|
||||
@@ -1181,7 +1417,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
|
||||
return nil
|
||||
@@ -1193,16 +1429,39 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
// LastModified returned in the http headers
|
||||
//
|
||||
// SHA-1 will also be updated once the request has completed.
|
||||
func (o *Object) ModTime() (result time.Time) {
|
||||
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||
// The error is logged in readMetaData
|
||||
_ = o.readMetaData()
|
||||
_ = o.readMetaData(ctx)
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// Not possible with B2
|
||||
return fs.ErrorCantSetModTime
|
||||
// SetModTime sets the modification time of the Object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
info, err := o.getMetaData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.Info[timeKey] = timeString(modTime)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_copy_file",
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: o.id,
|
||||
Name: o.fs.root + o.remote, // copy to same name
|
||||
MetadataDirective: "REPLACE",
|
||||
ContentType: info.ContentType,
|
||||
Info: info.Info,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
@@ -1271,12 +1530,20 @@ func (file *openFile) Close() (err error) {
|
||||
var _ io.ReadCloser = &openFile{}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.fs.info.DownloadURL,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
// otherwise use the custom downloadUrl
|
||||
if o.fs.opt.DownloadURL == "" {
|
||||
opts.RootURL = o.fs.info.DownloadURL
|
||||
} else {
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
// Download by id if set otherwise by name
|
||||
if o.id != "" {
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
@@ -1354,11 +1621,11 @@ func urlEncode(in string) string {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
err = o.fs.Mkdir("")
|
||||
err = o.fs.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1376,7 +1643,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
if err == nil {
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(o, in, src)
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src)
|
||||
if err != nil {
|
||||
o.fs.putUploadBlock(buf)
|
||||
return err
|
||||
@@ -1391,16 +1658,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||
up, err := o.fs.newLargeUpload(o, in, src)
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Upload()
|
||||
}
|
||||
|
||||
modTime := src.ModTime()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
calculatedSha1, _ := src.Hash(hash.SHA1)
|
||||
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||
if calculatedSha1 == "" {
|
||||
calculatedSha1 = "hex_digits_at_end"
|
||||
har := newHashAppendingReader(in, sha1.New())
|
||||
@@ -1437,7 +1704,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// Content-Type b2/x-auto to automatically set the stored Content-Type
|
||||
// post upload. In the case where a file extension is absent or the
|
||||
// lookup fails, the Content-Type is set to application/octet-stream. The
|
||||
// Content-Type mappings can be purused here.
|
||||
// Content-Type mappings can be pursued here.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
// required
|
||||
@@ -1478,17 +1745,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-File-Name": urlEncode(o.fs.root + o.remote),
|
||||
"Content-Type": fs.MimeType(src),
|
||||
"Content-Type": fs.MimeType(ctx, src),
|
||||
sha1Header: calculatedSha1,
|
||||
timeHeader: timeString(modTime),
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
// for go1.8 (see release notes) we must nil the Body if we want a
|
||||
// "Content-Length: 0" header which b2 requires for all files.
|
||||
if size == 0 {
|
||||
opts.Body = nil
|
||||
}
|
||||
var response api.FileInfo
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -1508,7 +1770,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
@@ -1519,7 +1781,7 @@ func (o *Object) Remove() error {
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
@@ -1530,12 +1792,14 @@ func (o *Object) ID() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
|
||||
@@ -4,8 +4,8 @@ package b2
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -6,6 +6,7 @@ package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
@@ -14,12 +15,12 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
type hashAppendingReader struct {
|
||||
@@ -80,7 +81,7 @@ type largeUpload struct {
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
@@ -98,7 +99,7 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
modTime := src.ModTime()
|
||||
modTime := src.ModTime(ctx)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
@@ -110,14 +111,16 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -45,7 +45,7 @@ type Error struct {
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||
if e.Message != "" {
|
||||
@@ -57,7 +57,7 @@ func (e *Error) Error() string {
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
|
||||
@@ -10,6 +10,7 @@ package box
|
||||
// FIXME box can copy a directory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -20,19 +21,19 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
@@ -111,7 +112,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
@@ -171,13 +172,13 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
authRety := false
|
||||
authRetry := false
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
authRety = true
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for box
|
||||
@@ -193,9 +194,9 @@ func restoreReservedChars(x string) string {
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -238,6 +239,7 @@ func errorHandler(resp *http.Response) error {
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -260,7 +262,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
@@ -271,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath("")
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -279,7 +281,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(false)
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
@@ -287,12 +289,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -303,7 +305,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.features.Fill(&tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -323,7 +325,7 @@ func (f *Fs) rootSlash() string {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -333,7 +335,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -343,12 +345,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
@@ -368,7 +370,7 @@ func fieldsValue() url.Values {
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
@@ -467,12 +469,12 @@ OUTER:
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(false)
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -486,7 +488,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// FIXME more info from dir?
|
||||
entries = append(entries, d)
|
||||
} else if info.Type == api.ItemTypeFile {
|
||||
o, err := f.newObjectWithInfo(remote, info)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
@@ -510,9 +512,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -529,22 +531,22 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(in, src)
|
||||
return f.PutUnchecked(ctx, in, src)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
@@ -554,26 +556,26 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
size := src.Size()
|
||||
modTime := src.ModTime()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
o, _, _, err := f.createObject(remote, modTime, size)
|
||||
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, o.Update(in, src, options...)
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
err := f.dirCache.FindRoot(true)
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(dir, true)
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -593,17 +595,17 @@ func (f *Fs) deleteObject(id string) error {
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
root := path.Join(f.root, dir)
|
||||
if root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(false)
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(dir, false)
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -633,8 +635,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
@@ -651,13 +653,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err := srcObj.readMetaData()
|
||||
err := srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -669,7 +671,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -708,8 +710,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
@@ -746,7 +748,7 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
@@ -754,7 +756,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -780,7 +782,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
@@ -796,14 +798,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err := srcFs.dirCache.FindRoot(false)
|
||||
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(true)
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -819,14 +821,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
@@ -837,7 +839,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -852,8 +854,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
id, err := f.dirCache.FindDir(remote, false)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
id, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
var opts rest.Opts
|
||||
if err == nil {
|
||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||
@@ -865,7 +867,7 @@ func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||
o, err := f.NewObject(remote)
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -928,7 +930,7 @@ func (o *Object) srvPath() string {
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -937,7 +939,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(context.TODO())
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -962,11 +964,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
|
||||
@@ -983,8 +985,8 @@ func (o *Object) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -993,7 +995,7 @@ func (o *Object) ModTime() time.Time {
|
||||
}
|
||||
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/files/" + o.id,
|
||||
@@ -1011,8 +1013,8 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
info, err := o.setModTime(modTime)
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
info, err := o.setModTime(ctx, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1025,7 +1027,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.id == "" {
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
@@ -1093,16 +1095,16 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
|
||||
size := src.Size()
|
||||
modTime := src.ModTime()
|
||||
modTime := src.ModTime(ctx)
|
||||
remote := o.Remote()
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1117,7 +1119,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,8 @@ package box_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/box"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/box"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -14,11 +14,11 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
@@ -97,7 +97,7 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
|
||||
var body []byte
|
||||
var resp *http.Response
|
||||
// For discussion of this value see:
|
||||
// https://github.com/ncw/rclone/issues/2054
|
||||
// https://github.com/rclone/rclone/issues/2054
|
||||
maxTries := o.fs.opt.CommitRetries
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
@@ -112,7 +112,7 @@ outer:
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
delay := defaultDelay
|
||||
why := "unknown"
|
||||
var why string
|
||||
if err != nil {
|
||||
// Sometimes we get 400 Error with
|
||||
// parts_mismatch immediately after uploading
|
||||
@@ -211,8 +211,8 @@ outer:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
|
||||
// Make a block of memory
|
||||
|
||||
173
backend/cache/cache.go
vendored
173
backend/cache/cache.go
vendored
@@ -18,18 +18,19 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/atexit"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
@@ -481,7 +482,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||
}
|
||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||
f.tempFs, err = fs.NewFs(f.opt.TempWritePath)
|
||||
f.tempFs, err = cache.Get(f.opt.TempWritePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||
}
|
||||
@@ -508,7 +509,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||
pollInterval := make(chan time.Duration, 1)
|
||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||
doChangeNotify(f.receiveChangeNotify, pollInterval)
|
||||
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
@@ -576,7 +577,7 @@ The slice indices are similar to Python slices: start[:end]
|
||||
|
||||
start is the 0 based chunk number from the beginning of the file
|
||||
to fetch inclusive. end is 0 based chunk number from the beginning
|
||||
of the file to fetch exclisive.
|
||||
of the file to fetch exclusive.
|
||||
Both values can be negative, in which case they count from the back
|
||||
of the file. The value "-5:" represents the last 5 chunks of a file.
|
||||
|
||||
@@ -599,7 +600,7 @@ is used on top of the cache.
|
||||
return f, fsErr
|
||||
}
|
||||
|
||||
func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
|
||||
func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
m, err := f.Stats()
|
||||
if err != nil {
|
||||
@@ -626,7 +627,7 @@ func (f *Fs) unwrapRemote(remote string) string {
|
||||
return remote
|
||||
}
|
||||
|
||||
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
remoteInt, ok := in["remote"]
|
||||
if !ok {
|
||||
@@ -671,7 +672,7 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
|
||||
func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
type chunkRange struct {
|
||||
start, end int64
|
||||
}
|
||||
@@ -776,18 +777,18 @@ func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
|
||||
for _, pair := range files {
|
||||
file, remote := pair[0], pair[1]
|
||||
var status fileStatus
|
||||
o, err := f.NewObject(remote)
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
||||
continue
|
||||
}
|
||||
co := o.(*Object)
|
||||
err = co.refreshFromSource(true)
|
||||
err = co.refreshFromSource(ctx, true)
|
||||
if err != nil {
|
||||
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
||||
continue
|
||||
}
|
||||
handle := NewObjectHandle(co, f)
|
||||
handle := NewObjectHandle(ctx, co, f)
|
||||
handle.UseMemory = false
|
||||
handle.scaleWorkers(1)
|
||||
walkChunkRanges(crs, co.Size(), func(chunk int64) {
|
||||
@@ -870,10 +871,10 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
|
||||
}
|
||||
}
|
||||
|
||||
// ChangeNotify can subsribe multiple callers
|
||||
// ChangeNotify can subscribe multiple callers
|
||||
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
||||
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
f.parentsForgetMu.Lock()
|
||||
defer f.parentsForgetMu.Unlock()
|
||||
fs.Debugf(f, "subscribing to ChangeNotify")
|
||||
@@ -920,7 +921,7 @@ func (f *Fs) TempUploadWaitTime() time.Duration {
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
var err error
|
||||
|
||||
fs.Debugf(f, "new object '%s'", remote)
|
||||
@@ -939,16 +940,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// search for entry in source or temp fs
|
||||
var obj fs.Object
|
||||
if f.opt.TempWritePath != "" {
|
||||
obj, err = f.tempFs.NewObject(remote)
|
||||
obj, err = f.tempFs.NewObject(ctx, remote)
|
||||
// not found in temp fs
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "find: not found in local cache fs")
|
||||
obj, err = f.Fs.NewObject(remote)
|
||||
obj, err = f.Fs.NewObject(ctx, remote)
|
||||
} else {
|
||||
fs.Debugf(obj, "find: found in local cache fs")
|
||||
}
|
||||
} else {
|
||||
obj, err = f.Fs.NewObject(remote)
|
||||
obj, err = f.Fs.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// not found in either fs
|
||||
@@ -958,13 +959,13 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// cache the new entry
|
||||
co = ObjectFromOriginal(f, obj).persist()
|
||||
co = ObjectFromOriginal(ctx, f, obj).persist()
|
||||
fs.Debugf(co, "find: cached object")
|
||||
return co, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(f, "list '%s'", dir)
|
||||
cd := ShallowDirectory(f, dir)
|
||||
|
||||
@@ -994,12 +995,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
|
||||
|
||||
for _, queuedRemote := range queuedEntries {
|
||||
queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote))
|
||||
queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote))
|
||||
if err != nil {
|
||||
fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
|
||||
continue
|
||||
}
|
||||
co := ObjectFromOriginal(f, queuedEntry).persist()
|
||||
co := ObjectFromOriginal(ctx, f, queuedEntry).persist()
|
||||
fs.Debugf(co, "list: cached temp object")
|
||||
cachedEntries = append(cachedEntries, co)
|
||||
}
|
||||
@@ -1007,7 +1008,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
|
||||
// search from the source
|
||||
sourceEntries, err := f.Fs.List(dir)
|
||||
sourceEntries, err := f.Fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1045,11 +1046,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
|
||||
continue
|
||||
}
|
||||
co := ObjectFromOriginal(f, o).persist()
|
||||
co := ObjectFromOriginal(ctx, f, o).persist()
|
||||
cachedEntries = append(cachedEntries, co)
|
||||
fs.Debugf(dir, "list: cached object: %v", co)
|
||||
case fs.Directory:
|
||||
cdd := DirectoryFromOriginal(f, o)
|
||||
cdd := DirectoryFromOriginal(ctx, f, o)
|
||||
// check if the dir isn't expired and add it in cache if it isn't
|
||||
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||
batchDirectories = append(batchDirectories, cdd)
|
||||
@@ -1079,8 +1080,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return cachedEntries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
||||
entries, err := f.List(dir)
|
||||
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
|
||||
entries, err := f.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1088,7 +1089,7 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
||||
for i := 0; i < len(entries); i++ {
|
||||
innerDir, ok := entries[i].(fs.Directory)
|
||||
if ok {
|
||||
err := f.recurse(innerDir.Remote(), list)
|
||||
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1105,21 +1106,21 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
fs.Debugf(f, "list recursively from '%s'", dir)
|
||||
|
||||
// we check if the source FS supports ListR
|
||||
// if it does, we'll use that to get all the entries, cache them and return
|
||||
do := f.Fs.Features().ListR
|
||||
if do != nil {
|
||||
return do(dir, func(entries fs.DirEntries) error {
|
||||
return do(ctx, dir, func(entries fs.DirEntries) error {
|
||||
// we got called back with a set of entries so let's cache them and call the original callback
|
||||
for _, entry := range entries {
|
||||
switch o := entry.(type) {
|
||||
case fs.Object:
|
||||
_ = f.cache.AddObject(ObjectFromOriginal(f, o))
|
||||
_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o))
|
||||
case fs.Directory:
|
||||
_ = f.cache.AddDir(DirectoryFromOriginal(f, o))
|
||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||
default:
|
||||
return errors.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
@@ -1132,7 +1133,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
|
||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.recurse(dir, list)
|
||||
err = f.recurse(ctx, dir, list)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1141,9 +1142,9 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fs.Debugf(f, "mkdir '%s'", dir)
|
||||
err := f.Fs.Mkdir(dir)
|
||||
err := f.Fs.Mkdir(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1171,7 +1172,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
fs.Debugf(f, "rmdir '%s'", dir)
|
||||
|
||||
if f.opt.TempWritePath != "" {
|
||||
@@ -1181,9 +1182,9 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
|
||||
// we check if the source exists on the remote and make the same move on it too if it does
|
||||
// otherwise, we skip this step
|
||||
_, err := f.UnWrap().List(dir)
|
||||
_, err := f.UnWrap().List(ctx, dir)
|
||||
if err == nil {
|
||||
err := f.Fs.Rmdir(dir)
|
||||
err := f.Fs.Rmdir(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1191,10 +1192,10 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
|
||||
var queuedEntries []*Object
|
||||
err = walk.Walk(f.tempFs, dir, true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
co := ObjectFromOriginal(f, oo)
|
||||
co := ObjectFromOriginal(ctx, f, oo)
|
||||
queuedEntries = append(queuedEntries, co)
|
||||
}
|
||||
}
|
||||
@@ -1211,7 +1212,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.Fs.Rmdir(dir)
|
||||
err := f.Fs.Rmdir(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1242,7 +1243,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
||||
|
||||
do := f.Fs.Features().DirMove
|
||||
@@ -1264,8 +1265,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
f.backgroundRunner.pause()
|
||||
defer f.backgroundRunner.play()
|
||||
|
||||
_, errInWrap := srcFs.UnWrap().List(srcRemote)
|
||||
_, errInTemp := f.tempFs.List(srcRemote)
|
||||
_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote)
|
||||
_, errInTemp := f.tempFs.List(ctx, srcRemote)
|
||||
// not found in either fs
|
||||
if errInWrap != nil && errInTemp != nil {
|
||||
return fs.ErrorDirNotFound
|
||||
@@ -1274,7 +1275,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
// we check if the source exists on the remote and make the same move on it too if it does
|
||||
// otherwise, we skip this step
|
||||
if errInWrap == nil {
|
||||
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
|
||||
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1287,10 +1288,10 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
var queuedEntries []*Object
|
||||
err := walk.Walk(f.tempFs, srcRemote, true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
co := ObjectFromOriginal(f, oo)
|
||||
co := ObjectFromOriginal(ctx, f, oo)
|
||||
queuedEntries = append(queuedEntries, co)
|
||||
if co.tempFileStartedUpload() {
|
||||
fs.Errorf(co, "can't move - upload has already started. need to finish that")
|
||||
@@ -1311,16 +1312,16 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
err = do(f.tempFs, srcRemote, dstRemote)
|
||||
err = do(ctx, f.tempFs, srcRemote, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.cache.ReconcileTempUploads(f)
|
||||
err = f.cache.ReconcileTempUploads(ctx, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
|
||||
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1426,10 +1427,10 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
||||
}
|
||||
}
|
||||
|
||||
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
|
||||
// put in to the remote path
|
||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
var err error
|
||||
var obj fs.Object
|
||||
|
||||
@@ -1440,7 +1441,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
_ = f.cache.ExpireDir(parentCd)
|
||||
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||
|
||||
obj, err = f.tempFs.Put(in, src, options...)
|
||||
obj, err = f.tempFs.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
|
||||
return nil, err
|
||||
@@ -1455,14 +1456,14 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
// if cache writes is enabled write it first through cache
|
||||
} else if f.opt.StoreWrites {
|
||||
f.cacheReader(in, src, func(inn io.Reader) {
|
||||
obj, err = put(inn, src, options...)
|
||||
obj, err = put(ctx, inn, src, options...)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
|
||||
}
|
||||
// last option: save it directly in remote fs
|
||||
} else {
|
||||
obj, err = put(in, src, options...)
|
||||
obj, err = put(ctx, in, src, options...)
|
||||
if err == nil {
|
||||
fs.Debugf(obj, "put: uploaded to remote fs")
|
||||
}
|
||||
@@ -1474,7 +1475,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
}
|
||||
|
||||
// cache the new file
|
||||
cachedObj := ObjectFromOriginal(f, obj)
|
||||
cachedObj := ObjectFromOriginal(ctx, f, obj)
|
||||
|
||||
// deleting cached chunks and info to be replaced with new ones
|
||||
_ = f.cache.RemoveObject(cachedObj.abs())
|
||||
@@ -1497,33 +1498,33 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
fs.Debugf(f, "put data at '%s'", src.Remote())
|
||||
return f.put(in, src, options, f.Fs.Put)
|
||||
return f.put(ctx, in, src, options, f.Fs.Put)
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.Fs.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
}
|
||||
fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
|
||||
return f.put(in, src, options, do)
|
||||
return f.put(ctx, in, src, options, do)
|
||||
}
|
||||
|
||||
// PutStream uploads the object
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.Fs.Features().PutStream
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutStream")
|
||||
}
|
||||
fs.Debugf(f, "put data streaming in '%s'", src.Remote())
|
||||
return f.put(in, src, options, do)
|
||||
return f.put(ctx, in, src, options, do)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
||||
|
||||
do := f.Fs.Features().Copy
|
||||
@@ -1543,13 +1544,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
// refresh from source or abort
|
||||
if err := srcObj.refreshFromSource(false); err != nil {
|
||||
if err := srcObj.refreshFromSource(ctx, false); err != nil {
|
||||
fs.Errorf(f, "can't copy %v - %v", src, err)
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
// we check if the feature is still active
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantCopy
|
||||
@@ -1562,7 +1563,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
}
|
||||
|
||||
obj, err := do(srcObj.Object, remote)
|
||||
obj, err := do(ctx, srcObj.Object, remote)
|
||||
if err != nil {
|
||||
fs.Errorf(srcObj, "error moving in cache: %v", err)
|
||||
return nil, err
|
||||
@@ -1570,7 +1571,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(obj, "copy: file copied")
|
||||
|
||||
// persist new
|
||||
co := ObjectFromOriginal(f, obj).persist()
|
||||
co := ObjectFromOriginal(ctx, f, obj).persist()
|
||||
fs.Debugf(co, "copy: added to cache")
|
||||
// expire the destination path
|
||||
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
|
||||
@@ -1597,7 +1598,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
||||
|
||||
// if source fs doesn't support move abort
|
||||
@@ -1618,14 +1619,14 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
// refresh from source or abort
|
||||
if err := srcObj.refreshFromSource(false); err != nil {
|
||||
if err := srcObj.refreshFromSource(ctx, false); err != nil {
|
||||
fs.Errorf(f, "can't move %v - %v", src, err)
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// if this is a temp object then we perform the changes locally
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
// we check if the feature is still active
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantMove
|
||||
@@ -1654,7 +1655,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(srcObj, "move: queued file moved to %v", remote)
|
||||
}
|
||||
|
||||
obj, err := do(srcObj.Object, remote)
|
||||
obj, err := do(ctx, srcObj.Object, remote)
|
||||
if err != nil {
|
||||
fs.Errorf(srcObj, "error moving: %v", err)
|
||||
return nil, err
|
||||
@@ -1679,7 +1680,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||
// persist new
|
||||
cachedObj := ObjectFromOriginal(f, obj).persist()
|
||||
cachedObj := ObjectFromOriginal(ctx, f, obj).persist()
|
||||
fs.Debugf(cachedObj, "move: added to cache")
|
||||
// expire new parent
|
||||
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
|
||||
@@ -1701,7 +1702,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
func (f *Fs) Purge() error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
fs.Infof(f, "purging cache")
|
||||
f.cache.Purge()
|
||||
|
||||
@@ -1710,7 +1711,7 @@ func (f *Fs) Purge() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := do()
|
||||
err := do(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1719,7 +1720,7 @@ func (f *Fs) Purge() error {
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
func (f *Fs) CleanUp() error {
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
f.CleanUpCache(false)
|
||||
|
||||
do := f.Fs.Features().CleanUp
|
||||
@@ -1727,16 +1728,16 @@ func (f *Fs) CleanUp() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return do()
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
return do()
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Stats returns stats about the cache storage
|
||||
|
||||
177
backend/cache/cache_internal_test.go
vendored
177
backend/cache/cache_internal_test.go
vendored
@@ -4,6 +4,7 @@ package cache_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
@@ -21,19 +22,20 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
"github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -120,7 +122,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
|
||||
require.NoError(t, err)
|
||||
listInner, err := rootFs2.List("")
|
||||
listInner, err := rootFs2.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, listRoot, 1)
|
||||
@@ -138,10 +140,10 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
err := rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
runInstance.writeObjectString(t, rootFs, "test/second", "content")
|
||||
_, err = rootFs.List("test")
|
||||
_, err = rootFs.List(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
@@ -266,7 +268,7 @@ func TestInternalObjNotFound(t *testing.T) {
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
obj, err := rootFs.NewObject("404")
|
||||
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, obj)
|
||||
}
|
||||
@@ -354,8 +356,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
testData1 = []byte(fstest.RandomString(100))
|
||||
testData2 = []byte(fstest.RandomString(200))
|
||||
testData1 = []byte(random.String(100))
|
||||
testData2 = []byte(random.String(200))
|
||||
}
|
||||
|
||||
// write the object
|
||||
@@ -387,10 +389,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
|
||||
// write the object
|
||||
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||
require.Equal(t, o.Size(), int64(testSize))
|
||||
require.Equal(t, o.Size(), testSize)
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||
|
||||
@@ -445,7 +447,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
log.Printf("original size: %v", originalSize)
|
||||
|
||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
expectedSize := int64(len([]byte("test content")))
|
||||
var data2 []byte
|
||||
@@ -457,7 +459,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
data2 = []byte("test content")
|
||||
}
|
||||
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
|
||||
err = o.Update(bytes.NewReader(data2), objInfo)
|
||||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data2)), o.Size())
|
||||
log.Printf("updated size: %v", len(data2))
|
||||
@@ -503,9 +505,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
} else {
|
||||
testData = []byte("test content")
|
||||
}
|
||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test"))
|
||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one"))
|
||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second"))
|
||||
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test"))
|
||||
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one"))
|
||||
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second"))
|
||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||
|
||||
// list in mount
|
||||
@@ -515,7 +517,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// move file
|
||||
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
||||
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = runInstance.retryBlock(func() error {
|
||||
@@ -589,9 +591,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
} else {
|
||||
testData = []byte("test content")
|
||||
}
|
||||
err = rootFs.Mkdir("test")
|
||||
err = rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("test/one")
|
||||
err = rootFs.Mkdir(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||
|
||||
@@ -608,7 +610,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
require.False(t, found)
|
||||
|
||||
// move file
|
||||
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
||||
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = runInstance.retryBlock(func() error {
|
||||
@@ -670,23 +672,23 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||
err = o.SetModTime(wrappedTime)
|
||||
err = o.SetModTime(context.Background(), wrappedTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err := rootFs.NewObject("data.bin")
|
||||
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
||||
|
||||
cfs.DirCacheFlush() // flush the cache
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err = rootFs.NewObject("data.bin")
|
||||
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
@@ -713,40 +715,44 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||
err = o.SetModTime(wrappedTime)
|
||||
err = o.SetModTime(context.Background(), wrappedTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err := rootFs.NewObject("data.bin")
|
||||
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
||||
|
||||
// Call the rc function
|
||||
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
|
||||
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
require.Contains(t, m["message"], "cached file cleared")
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err = rootFs.NewObject("data.bin")
|
||||
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||
_, err = runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
|
||||
// create some rand test data
|
||||
testData2 := randStringBytes(int(chunkSize))
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
||||
|
||||
// list should have 1 item only
|
||||
li1, err = runInstance.list(t, rootFs, "")
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
// Call the rc function
|
||||
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
|
||||
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -754,6 +760,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
|
||||
// list should have 2 items now
|
||||
li2, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li2, 2)
|
||||
}
|
||||
|
||||
@@ -789,7 +796,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
// create some rand test data
|
||||
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
co, ok := o.(*cache.Object)
|
||||
require.True(t, ok)
|
||||
@@ -828,7 +835,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, l, 1)
|
||||
|
||||
err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
|
||||
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third"))
|
||||
require.NoError(t, err)
|
||||
|
||||
l, err = runInstance.list(t, rootFs, "test")
|
||||
@@ -863,14 +870,14 @@ func TestInternalBug2117(t *testing.T) {
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = cfs.UnWrap().Mkdir("test")
|
||||
err = cfs.UnWrap().Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
for i := 1; i <= 4; i++ {
|
||||
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i))
|
||||
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i))
|
||||
require.NoError(t, err)
|
||||
|
||||
for j := 1; j <= 4; j++ {
|
||||
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j))
|
||||
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j))
|
||||
require.NoError(t, err)
|
||||
|
||||
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
|
||||
@@ -1075,10 +1082,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
|
||||
if purge {
|
||||
_ = f.Features().Purge()
|
||||
_ = f.Features().Purge(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = f.Mkdir("")
|
||||
err = f.Mkdir(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
if r.useMount && !r.isMounted {
|
||||
r.mountFs(t, f)
|
||||
@@ -1092,7 +1099,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
r.unmountFs(t, f)
|
||||
}
|
||||
|
||||
err := f.Features().Purge()
|
||||
err := f.Features().Purge(context.Background())
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
@@ -1194,7 +1201,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read
|
||||
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
||||
in := bytes.NewReader(data)
|
||||
_ = r.writeObjectReader(t, f, remote, in)
|
||||
o, err := f.NewObject(remote)
|
||||
o, err := f.NewObject(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data)), o.Size())
|
||||
return o
|
||||
@@ -1203,7 +1210,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
|
||||
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
|
||||
modTime := time.Now()
|
||||
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
||||
obj, err := f.Put(in, objInfo)
|
||||
obj, err := f.Put(context.Background(), in, objInfo)
|
||||
require.NoError(t, err)
|
||||
if r.useMount {
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
@@ -1223,18 +1230,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
|
||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
||||
require.NoError(t, err)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
obj, err = f.NewObject(remote)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
} else {
|
||||
in1 := bytes.NewReader(data1)
|
||||
in2 := bytes.NewReader(data2)
|
||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
|
||||
obj, err = f.Put(in1, objInfo1)
|
||||
obj, err = f.Put(context.Background(), in1, objInfo1)
|
||||
require.NoError(t, err)
|
||||
obj, err = f.NewObject(remote)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
err = obj.Update(in2, objInfo2)
|
||||
err = obj.Update(context.Background(), in2, objInfo2)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1263,7 +1270,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
return checkSample, err
|
||||
}
|
||||
} else {
|
||||
co, err := f.NewObject(remote)
|
||||
co, err := f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
@@ -1278,7 +1285,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
|
||||
size := end - offset
|
||||
checkSample := make([]byte, size)
|
||||
reader, err := o.Open(&fs.SeekOption{Offset: offset})
|
||||
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset})
|
||||
require.NoError(t, err)
|
||||
totalRead, err := io.ReadFull(reader, checkSample)
|
||||
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
|
||||
@@ -1295,7 +1302,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
|
||||
if r.useMount {
|
||||
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
||||
} else {
|
||||
err = f.Mkdir(remote)
|
||||
err = f.Mkdir(context.Background(), remote)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1307,11 +1314,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||
err = os.Remove(path.Join(r.mntDir, remote))
|
||||
} else {
|
||||
var obj fs.Object
|
||||
obj, err = f.NewObject(remote)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
err = f.Rmdir(remote)
|
||||
err = f.Rmdir(context.Background(), remote)
|
||||
} else {
|
||||
err = obj.Remove()
|
||||
err = obj.Remove(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1329,7 +1336,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
||||
}
|
||||
} else {
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(remote)
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
l = append(l, ll)
|
||||
}
|
||||
@@ -1348,7 +1355,7 @@ func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
|
||||
}
|
||||
} else {
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(remote)
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
l = append(l, ll.Remote())
|
||||
}
|
||||
@@ -1388,7 +1395,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().DirMove != nil {
|
||||
err = rootFs.Features().DirMove(rootFs, src, dst)
|
||||
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1410,11 +1417,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().Move != nil {
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rootFs.Features().Move(obj1, dst)
|
||||
_, err = rootFs.Features().Move(context.Background(), obj1, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1436,11 +1443,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().Copy != nil {
|
||||
obj, err := rootFs.NewObject(src)
|
||||
obj, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rootFs.Features().Copy(obj, dst)
|
||||
_, err = rootFs.Features().Copy(context.Background(), obj, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1462,11 +1469,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
|
||||
}
|
||||
return fi.ModTime(), nil
|
||||
}
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return obj1.ModTime(), nil
|
||||
return obj1.ModTime(context.Background()), nil
|
||||
}
|
||||
|
||||
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||
@@ -1479,7 +1486,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||
}
|
||||
return fi.Size(), nil
|
||||
}
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return int64(0), err
|
||||
}
|
||||
@@ -1490,7 +1497,8 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
var f *os.File
|
||||
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1500,14 +1508,15 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
}()
|
||||
_, err = f.WriteString(data + append)
|
||||
} else {
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
var obj1 fs.Object
|
||||
obj1, err = rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data1 := []byte(data + append)
|
||||
r := bytes.NewReader(data1)
|
||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||
err = obj1.Update(r, objInfo1)
|
||||
err = obj1.Update(context.Background(), r, objInfo1)
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -1632,15 +1641,13 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
|
||||
cfs, ok := f.(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
} else {
|
||||
if f.Features().UnWrap != nil {
|
||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
}
|
||||
}
|
||||
if f.Features().UnWrap != nil {
|
||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("didn't found a cache fs")
|
||||
}
|
||||
|
||||
|
||||
6
backend/cache/cache_mount_unix_test.go
vendored
6
backend/cache/cache_mount_unix_test.go
vendored
@@ -9,9 +9,9 @@ import (
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mount"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/rclone/rclone/cmd/mount"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
||||
6
backend/cache/cache_mount_windows_test.go
vendored
6
backend/cache/cache_mount_windows_test.go
vendored
@@ -9,10 +9,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/ncw/rclone/cmd/cmount"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/cmount"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
||||
12
backend/cache/cache_test.go
vendored
12
backend/cache/cache_test.go
vendored
@@ -7,15 +7,17 @@ package cache_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
})
|
||||
}
|
||||
|
||||
53
backend/cache/cache_upload_test.go
vendored
53
backend/cache/cache_upload_test.go
vendored
@@ -3,6 +3,7 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -11,9 +12,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -85,11 +86,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
err = rootFs.Mkdir(context.Background(), "one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
err = rootFs.Mkdir(context.Background(), "second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
@@ -122,11 +123,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
err = rootFs.Mkdir(context.Background(), "one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
err = rootFs.Mkdir(context.Background(), "second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
@@ -165,7 +166,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
err := rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
@@ -233,9 +234,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -256,7 +257,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -270,9 +271,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
@@ -289,9 +290,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
@@ -306,7 +307,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -318,7 +319,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
obj2, err := rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
@@ -366,7 +367,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -378,7 +379,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -389,9 +390,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -404,9 +405,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
@@ -421,7 +422,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
|
||||
9
backend/cache/directory.go
vendored
9
backend/cache/directory.go
vendored
@@ -3,10 +3,11 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Directory is a generic dir that stores basic information about it
|
||||
@@ -55,7 +56,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
|
||||
}
|
||||
|
||||
// DirectoryFromOriginal builds one from a generic fs.Directory
|
||||
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
||||
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory {
|
||||
var cd *Directory
|
||||
fullRemote := path.Join(f.Root(), d.Remote())
|
||||
|
||||
@@ -67,7 +68,7 @@ func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
||||
CacheFs: f,
|
||||
Name: name,
|
||||
Dir: dir,
|
||||
CacheModTime: d.ModTime().UnixNano(),
|
||||
CacheModTime: d.ModTime(ctx).UnixNano(),
|
||||
CacheSize: d.Size(),
|
||||
CacheItems: d.Items(),
|
||||
CacheType: "Directory",
|
||||
@@ -110,7 +111,7 @@ func (d *Directory) parentRemote() string {
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (d *Directory) ModTime() time.Time {
|
||||
func (d *Directory) ModTime(ctx context.Context) time.Time {
|
||||
return time.Unix(0, d.CacheModTime)
|
||||
}
|
||||
|
||||
|
||||
25
backend/cache/handle.go
vendored
25
backend/cache/handle.go
vendored
@@ -3,6 +3,7 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
@@ -11,9 +12,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
var uploaderMap = make(map[string]*backgroundWriter)
|
||||
@@ -40,6 +41,7 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
|
||||
|
||||
// Handle is managing the read/write/seek operations on an open handle
|
||||
type Handle struct {
|
||||
ctx context.Context
|
||||
cachedObject *Object
|
||||
cfs *Fs
|
||||
memory *Memory
|
||||
@@ -58,8 +60,9 @@ type Handle struct {
|
||||
}
|
||||
|
||||
// NewObjectHandle returns a new Handle for an existing Object
|
||||
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
||||
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle {
|
||||
r := &Handle{
|
||||
ctx: ctx,
|
||||
cachedObject: o,
|
||||
cfs: cfs,
|
||||
offset: 0,
|
||||
@@ -351,7 +354,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
r := w.rc
|
||||
if w.rc == nil {
|
||||
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
||||
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -361,7 +364,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
|
||||
if !closeOpen {
|
||||
if do, ok := r.(fs.RangeSeeker); ok {
|
||||
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
|
||||
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset)
|
||||
return r, err
|
||||
} else if do, ok := r.(io.Seeker); ok {
|
||||
_, err = do.Seek(offset, io.SeekStart)
|
||||
@@ -371,7 +374,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
|
||||
_ = w.rc.Close()
|
||||
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
||||
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -449,7 +452,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
// we seem to be getting only errors so we abort
|
||||
if err != nil {
|
||||
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
||||
err = w.r.cachedObject.refreshFromSource(true)
|
||||
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "%v", err)
|
||||
}
|
||||
@@ -462,7 +465,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
sourceRead, err = io.ReadFull(w.rc, data)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||
err = w.r.cachedObject.refreshFromSource(true)
|
||||
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "%v", err)
|
||||
}
|
||||
@@ -588,7 +591,7 @@ func (b *backgroundWriter) run() {
|
||||
remote := b.fs.cleanRootFromPath(absPath)
|
||||
b.notify(remote, BackgroundUploadStarted, nil)
|
||||
fs.Infof(remote, "background upload: started upload")
|
||||
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
||||
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
||||
if err != nil {
|
||||
b.notify(remote, BackgroundUploadError, err)
|
||||
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
||||
@@ -598,14 +601,14 @@ func (b *backgroundWriter) run() {
|
||||
// clean empty dirs up to root
|
||||
thisDir := cleanPath(path.Dir(remote))
|
||||
for thisDir != "" {
|
||||
thisList, err := b.fs.tempFs.List(thisDir)
|
||||
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(thisList) > 0 {
|
||||
break
|
||||
}
|
||||
err = b.fs.tempFs.Rmdir(thisDir)
|
||||
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir)
|
||||
fs.Debugf(thisDir, "cleaned from temp path")
|
||||
if err != nil {
|
||||
break
|
||||
|
||||
71
backend/cache/object.go
vendored
71
backend/cache/object.go
vendored
@@ -3,15 +3,16 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -68,7 +69,7 @@ func NewObject(f *Fs, remote string) *Object {
|
||||
}
|
||||
|
||||
// ObjectFromOriginal builds one from a generic fs.Object
|
||||
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
|
||||
var co *Object
|
||||
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
||||
dir, name := path.Split(fullRemote)
|
||||
@@ -92,13 +93,13 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
CacheType: cacheType,
|
||||
CacheTs: time.Now(),
|
||||
}
|
||||
co.updateData(o)
|
||||
co.updateData(ctx, o)
|
||||
return co
|
||||
}
|
||||
|
||||
func (o *Object) updateData(source fs.Object) {
|
||||
func (o *Object) updateData(ctx context.Context, source fs.Object) {
|
||||
o.Object = source
|
||||
o.CacheModTime = source.ModTime().UnixNano()
|
||||
o.CacheModTime = source.ModTime(ctx).UnixNano()
|
||||
o.CacheSize = source.Size()
|
||||
o.CacheStorable = source.Storable()
|
||||
o.CacheTs = time.Now()
|
||||
@@ -130,20 +131,20 @@ func (o *Object) abs() string {
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (o *Object) ModTime() time.Time {
|
||||
_ = o.refresh()
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
_ = o.refresh(ctx)
|
||||
return time.Unix(0, o.CacheModTime)
|
||||
}
|
||||
|
||||
// Size returns the cached Size
|
||||
func (o *Object) Size() int64 {
|
||||
_ = o.refresh()
|
||||
_ = o.refresh(context.TODO())
|
||||
return o.CacheSize
|
||||
}
|
||||
|
||||
// Storable returns the cached Storable
|
||||
func (o *Object) Storable() bool {
|
||||
_ = o.refresh()
|
||||
_ = o.refresh(context.TODO())
|
||||
return o.CacheStorable
|
||||
}
|
||||
|
||||
@@ -151,18 +152,18 @@ func (o *Object) Storable() bool {
|
||||
// all these conditions must be true to ignore a refresh
|
||||
// 1. cache ts didn't expire yet
|
||||
// 2. is not pending a notification from the wrapped fs
|
||||
func (o *Object) refresh() error {
|
||||
func (o *Object) refresh(ctx context.Context) error {
|
||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||
if !isExpired && !isNotified {
|
||||
return nil
|
||||
}
|
||||
|
||||
return o.refreshFromSource(true)
|
||||
return o.refreshFromSource(ctx, true)
|
||||
}
|
||||
|
||||
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
||||
func (o *Object) refreshFromSource(force bool) error {
|
||||
func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
||||
o.refreshMutex.Lock()
|
||||
defer o.refreshMutex.Unlock()
|
||||
var err error
|
||||
@@ -172,29 +173,29 @@ func (o *Object) refreshFromSource(force bool) error {
|
||||
return nil
|
||||
}
|
||||
if o.isTempFile() {
|
||||
liveObject, err = o.ParentFs.NewObject(o.Remote())
|
||||
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||
} else {
|
||||
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
|
||||
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||
return err
|
||||
}
|
||||
o.updateData(liveObject)
|
||||
o.updateData(ctx, liveObject)
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModTime sets the ModTime of this object
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := o.Object.SetModTime(t)
|
||||
err := o.Object.SetModTime(ctx, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -207,19 +208,19 @@ func (o *Object) SetModTime(t time.Time) error {
|
||||
}
|
||||
|
||||
// Open is used to request a specific part of the file using fs.RangeOption
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var err error
|
||||
|
||||
if o.Object == nil {
|
||||
err = o.refreshFromSource(true)
|
||||
err = o.refreshFromSource(ctx, true)
|
||||
} else {
|
||||
err = o.refresh()
|
||||
err = o.refresh(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheReader := NewObjectHandle(o, o.CacheFs)
|
||||
cacheReader := NewObjectHandle(ctx, o, o.CacheFs)
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -238,8 +239,8 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
}
|
||||
|
||||
// Update will change the object data
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
@@ -254,7 +255,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||
|
||||
// FIXME use reliable upload
|
||||
err := o.Object.Update(in, src, options...)
|
||||
err := o.Object.Update(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error updating source: %v", err)
|
||||
return err
|
||||
@@ -265,7 +266,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
||||
|
||||
o.CacheModTime = src.ModTime().UnixNano()
|
||||
o.CacheModTime = src.ModTime(ctx).UnixNano()
|
||||
o.CacheSize = src.Size()
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
o.CacheTs = time.Now()
|
||||
@@ -275,8 +276,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove deletes the object from both the cache and the source
|
||||
func (o *Object) Remove() error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
@@ -288,7 +289,7 @@ func (o *Object) Remove() error {
|
||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||
}
|
||||
}
|
||||
err := o.Object.Remove()
|
||||
err := o.Object.Remove(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -306,8 +307,8 @@ func (o *Object) Remove() error {
|
||||
|
||||
// Hash requests a hash of the object and stores in the cache
|
||||
// since it might or might not be called, this is lazy loaded
|
||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||
_ = o.refresh()
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
_ = o.refresh(ctx)
|
||||
if o.CacheHashes == nil {
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
}
|
||||
@@ -316,10 +317,10 @@ func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||
if found {
|
||||
return cachedHash, nil
|
||||
}
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return "", err
|
||||
}
|
||||
liveHash, err := o.Object.Hash(ht)
|
||||
liveHash, err := o.Object.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
4
backend/cache/plex.go
vendored
4
backend/cache/plex.go
vendored
@@ -14,8 +14,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
|
||||
4
backend/cache/storage_memory.go
vendored
4
backend/cache/storage_memory.go
vendored
@@ -7,9 +7,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Memory is a wrapper of transient storage for a go-cache store
|
||||
|
||||
15
backend/cache/storage_persistent.go
vendored
15
backend/cache/storage_persistent.go
vendored
@@ -4,6 +4,7 @@ package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -16,9 +17,9 @@ import (
|
||||
"time"
|
||||
|
||||
bolt "github.com/coreos/bbolt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -398,7 +399,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
|
||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
@@ -809,7 +810,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
@@ -1014,7 +1015,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error {
|
||||
}
|
||||
|
||||
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
||||
func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
_ = tx.DeleteBucket([]byte(tempBucket))
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
@@ -1023,7 +1024,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||
}
|
||||
|
||||
var queuedEntries []fs.Object
|
||||
err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
queuedEntries = append(queuedEntries, oo)
|
||||
@@ -1049,7 +1050,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package crypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/aes"
|
||||
gocipher "crypto/cipher"
|
||||
"crypto/rand"
|
||||
@@ -13,10 +14,10 @@ import (
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt/pkcs7"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
@@ -41,6 +42,7 @@ var (
|
||||
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
|
||||
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
|
||||
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
|
||||
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
|
||||
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
|
||||
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
|
||||
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
|
||||
@@ -67,7 +69,7 @@ type ReadSeekCloser interface {
|
||||
}
|
||||
|
||||
// OpenRangeSeek opens the file handle at the offset with the limit given
|
||||
type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
|
||||
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
|
||||
|
||||
// Cipher is used to swap out the encryption implementations
|
||||
type Cipher interface {
|
||||
@@ -84,7 +86,7 @@ type Cipher interface {
|
||||
// DecryptData
|
||||
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
||||
// DecryptDataSeek decrypt at a given position
|
||||
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
||||
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
||||
// EncryptedSize calculates the size of the data when encrypted
|
||||
EncryptedSize(int64) int64
|
||||
// DecryptedSize calculates the size of the data when decrypted
|
||||
@@ -284,6 +286,9 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
// not possible if decodeFilename() working correctly
|
||||
return "", ErrorTooShortAfterDecode
|
||||
}
|
||||
if len(rawCiphertext) > 2048 {
|
||||
return "", ErrorTooLongAfterDecode
|
||||
}
|
||||
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
|
||||
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
|
||||
if err != nil {
|
||||
@@ -459,7 +464,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if int(newRune) < base {
|
||||
newRune += 256
|
||||
}
|
||||
_, _ = result.WriteRune(rune(newRune))
|
||||
_, _ = result.WriteRune(newRune)
|
||||
|
||||
default:
|
||||
_, _ = result.WriteRune(runeValue)
|
||||
@@ -744,29 +749,29 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
|
||||
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
|
||||
}
|
||||
// retreive the nonce
|
||||
// retrieve the nonce
|
||||
fh.nonce.fromBuf(readBuf[fileMagicSize:])
|
||||
fh.initialNonce = fh.nonce
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// newDecrypterSeek creates a new file handle decrypting on the fly
|
||||
func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||
var rc io.ReadCloser
|
||||
doRangeSeek := false
|
||||
setLimit := false
|
||||
// Open initially with no seek
|
||||
if offset == 0 && limit < 0 {
|
||||
// If no offset or limit then open whole file
|
||||
rc, err = open(0, -1)
|
||||
rc, err = open(ctx, 0, -1)
|
||||
} else if offset == 0 {
|
||||
// If no offset open the header + limit worth of the file
|
||||
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
|
||||
rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
|
||||
rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit)
|
||||
setLimit = true
|
||||
} else {
|
||||
// Otherwise just read the header to start with
|
||||
rc, err = open(0, int64(fileHeaderSize))
|
||||
rc, err = open(ctx, 0, int64(fileHeaderSize))
|
||||
doRangeSeek = true
|
||||
}
|
||||
if err != nil {
|
||||
@@ -779,7 +784,7 @@ func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *
|
||||
}
|
||||
fh.open = open // will be called by fh.RangeSeek
|
||||
if doRangeSeek {
|
||||
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
|
||||
_, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit)
|
||||
if err != nil {
|
||||
_ = fh.Close()
|
||||
return nil, err
|
||||
@@ -899,7 +904,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
|
||||
// limiting the total length to limit.
|
||||
//
|
||||
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
|
||||
func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
|
||||
func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
|
||||
@@ -926,7 +931,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
||||
// Can we seek underlying stream directly?
|
||||
if do, ok := fh.rc.(fs.RangeSeeker); ok {
|
||||
// Seek underlying stream directly
|
||||
_, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
|
||||
_, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit)
|
||||
if err != nil {
|
||||
return 0, fh.finish(err)
|
||||
}
|
||||
@@ -936,7 +941,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
||||
fh.rc = nil
|
||||
|
||||
// Re-open the underlying object with the offset given
|
||||
rc, err := fh.open(underlyingOffset, underlyingLimit)
|
||||
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
||||
if err != nil {
|
||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||
}
|
||||
@@ -965,7 +970,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
||||
|
||||
// Seek implements the io.Seeker interface
|
||||
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
||||
return fh.RangeSeek(offset, whence, -1)
|
||||
return fh.RangeSeek(context.TODO(), offset, whence, -1)
|
||||
}
|
||||
|
||||
// finish sets the final error and tidies up
|
||||
@@ -1039,8 +1044,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
// The open function must return a ReadCloser opened to the offset supplied
|
||||
//
|
||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||
func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
out, err := c.newDecrypterSeek(open, offset, limit)
|
||||
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package crypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base32"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -9,8 +10,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt/pkcs7"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -194,6 +195,10 @@ func TestEncryptSegment(t *testing.T) {
|
||||
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -201,6 +206,7 @@ func TestDecryptSegment(t *testing.T) {
|
||||
}{
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
@@ -960,7 +966,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
|
||||
// Open stream with a seek of underlyingOffset
|
||||
var reader io.ReadCloser
|
||||
open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
end := len(ciphertext)
|
||||
if underlyingLimit >= 0 {
|
||||
end = int(underlyingOffset + underlyingLimit)
|
||||
@@ -1001,7 +1007,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
if offset+limit > len(plaintext) {
|
||||
continue
|
||||
}
|
||||
rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
|
||||
rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit))
|
||||
assert.NoError(t, err)
|
||||
|
||||
check(rc, offset, limit)
|
||||
@@ -1009,14 +1015,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
// Try decoding it with a single open and lots of seeks
|
||||
fh, err := c.DecryptDataSeek(open, 0, -1)
|
||||
fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1)
|
||||
assert.NoError(t, err)
|
||||
for _, offset := range trials {
|
||||
for _, limit := range limits {
|
||||
if offset+limit > len(plaintext) {
|
||||
continue
|
||||
}
|
||||
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
|
||||
_, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit))
|
||||
assert.NoError(t, err)
|
||||
|
||||
check(fh, offset, limit)
|
||||
@@ -1067,7 +1073,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
} {
|
||||
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
|
||||
callCount := 0
|
||||
testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
switch callCount {
|
||||
case 0:
|
||||
assert.Equal(t, int64(0), underlyingOffset, what)
|
||||
@@ -1079,11 +1085,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
t.Errorf("Too many calls %d for %s", callCount+1, what)
|
||||
}
|
||||
callCount++
|
||||
return open(underlyingOffset, underlyingLimit)
|
||||
return open(ctx, underlyingOffset, underlyingLimit)
|
||||
}
|
||||
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
|
||||
fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1)
|
||||
assert.NoError(t, err)
|
||||
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
|
||||
gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gotOffset, test.offset)
|
||||
}
|
||||
|
||||
@@ -2,19 +2,20 @@
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -122,7 +123,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
return newCipherForConfig(opt)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -169,23 +170,10 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: false,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
doChangeNotify := wrappedFs.Features().ChangeNotify
|
||||
if doChangeNotify != nil {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
decrypted, err := f.DecryptFileName(path)
|
||||
if err != nil {
|
||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -202,6 +190,7 @@ type Options struct {
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
@@ -244,7 +233,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||
}
|
||||
|
||||
// Encrypt an directory file name to entries.
|
||||
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
@@ -254,18 +243,18 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newDir(dir))
|
||||
*entries = append(*entries, f.newDir(ctx, dir))
|
||||
}
|
||||
|
||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||
newEntries = entries[:0] // in place filter
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
f.add(&newEntries, x)
|
||||
case fs.Directory:
|
||||
f.addDir(&newEntries, x)
|
||||
f.addDir(ctx, &newEntries, x)
|
||||
default:
|
||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
@@ -282,12 +271,12 @@ func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.encryptEntries(entries)
|
||||
return f.encryptEntries(ctx, entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -306,9 +295,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
||||
newEntries, err := f.encryptEntries(entries)
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
||||
newEntries, err := f.encryptEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -317,18 +306,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
@@ -354,7 +343,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
|
||||
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -363,13 +352,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
if ht != hash.None && hasher != nil {
|
||||
srcHash := hasher.Sums()[ht]
|
||||
var dstHash string
|
||||
dstHash, err = o.Hash(ht)
|
||||
dstHash, err = o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove()
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
@@ -385,13 +374,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(in, src, options, f.Fs.Put)
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(ctx, in, src, options, f.Fs.Put)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(in, src, options, f.Fs.Features().PutStream)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(ctx, in, src, options, f.Fs.Features().PutStream)
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
@@ -402,15 +391,15 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
@@ -419,12 +408,12 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge() error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
do := f.Fs.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do()
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -436,7 +425,7 @@ func (f *Fs) Purge() error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
@@ -445,7 +434,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
||||
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -461,7 +450,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Move
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
@@ -470,7 +459,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
||||
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -485,7 +474,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
do := f.Fs.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
@@ -495,14 +484,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
||||
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.Fs.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
@@ -511,7 +500,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err := do(wrappedIn, f.newObjectInfo(src))
|
||||
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -522,21 +511,21 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp() error {
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
}
|
||||
return do()
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
return do()
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
@@ -544,6 +533,16 @@ func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// EncryptFileName returns an encrypted file name
|
||||
func (f *Fs) EncryptFileName(fileName string) string {
|
||||
return f.cipher.EncryptFileName(fileName)
|
||||
@@ -555,13 +554,13 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
}
|
||||
|
||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||
// src with it, and calcuates the hash given by HashType on the fly
|
||||
// src with it, and calculates the hash given by HashType on the fly
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||
// use a limited read so we only read the header
|
||||
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
@@ -591,7 +590,7 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
|
||||
}
|
||||
|
||||
// Open the src for input
|
||||
in, err = src.Open()
|
||||
in, err = src.Open(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
}
|
||||
@@ -616,6 +615,75 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
|
||||
return m.Sums()[hashType], nil
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
do := f.Fs.Features().MergeDirs
|
||||
if do == nil {
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
out := make([]fs.Directory, len(dirs))
|
||||
for i, dir := range dirs {
|
||||
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||
}
|
||||
return do(ctx, out)
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
do := f.Fs.Features().DirCacheFlush
|
||||
if do != nil {
|
||||
do()
|
||||
}
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
do := f.Fs.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", errors.New("PublicLink not supported")
|
||||
}
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
// assume it is a directory
|
||||
return do(ctx, f.cipher.EncryptDirName(remote))
|
||||
}
|
||||
return do(ctx, o.(*Object).Object.Remote())
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
do := f.Fs.Features().ChangeNotify
|
||||
if do == nil {
|
||||
return
|
||||
}
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||
var (
|
||||
err error
|
||||
decrypted string
|
||||
)
|
||||
switch entryType {
|
||||
case fs.EntryDirectory:
|
||||
decrypted, err = f.cipher.DecryptDirName(path)
|
||||
case fs.EntryObject:
|
||||
decrypted, err = f.cipher.DecryptFileName(path)
|
||||
default:
|
||||
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
do(ctx, wrappedNotifyFunc, pollIntervalChan)
|
||||
}
|
||||
|
||||
// Object describes a wrapped for being read from the Fs
|
||||
//
|
||||
// This decrypts the remote name and decrypts the data
|
||||
@@ -666,7 +734,7 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
@@ -676,7 +744,7 @@ func (o *Object) UnWrap() fs.Object {
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
@@ -690,10 +758,10 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
openOptions = append(openOptions, option)
|
||||
}
|
||||
}
|
||||
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
if underlyingOffset == 0 && underlyingLimit < 0 {
|
||||
// Open with no seek
|
||||
return o.Object.Open(openOptions...)
|
||||
return o.Object.Open(ctx, openOptions...)
|
||||
}
|
||||
// Open stream with a range of underlyingOffset, underlyingLimit
|
||||
end := int64(-1)
|
||||
@@ -704,7 +772,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
||||
return o.Object.Open(newOpenOptions...)
|
||||
return o.Object.Open(ctx, newOpenOptions...)
|
||||
}, offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -713,17 +781,17 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return o.Object, o.Object.Update(in, src, options...)
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return o.Object, o.Object.Update(ctx, in, src, options...)
|
||||
}
|
||||
_, err := o.f.put(in, src, options, update)
|
||||
_, err := o.f.put(ctx, in, src, options, update)
|
||||
return err
|
||||
}
|
||||
|
||||
// newDir returns a dir with the Name decrypted
|
||||
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
||||
newDir := fs.NewDirCopy(dir)
|
||||
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||
newDir := fs.NewDirCopy(ctx, dir)
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
@@ -770,10 +838,38 @@ func (o *ObjectInfo) Size() int64 {
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
|
||||
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// SetTier performs changing storage tier of the Object if
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
do, ok := o.Object.(fs.SetTierer)
|
||||
if !ok {
|
||||
return errors.New("crypt: underlying remote does not support SetTier")
|
||||
}
|
||||
return do.SetTier(tier)
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *Object) GetTier() string {
|
||||
do, ok := o.Object.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -787,7 +883,15 @@ var (
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -6,13 +6,13 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive" // for integration tests
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/swift" // for integration tests
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive" // for integration tests
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/swift" // for integration tests
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
@@ -21,8 +21,10 @@ func TestIntegration(t *testing.T) {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -42,6 +44,8 @@ func TestStandard(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -61,6 +65,8 @@ func TestOff(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -80,6 +86,8 @@ func TestObfuscate(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,7 @@ package drive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -10,16 +11,41 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantFlag bool
|
||||
}{
|
||||
{"", []string{
|
||||
"https://www.googleapis.com/auth/drive",
|
||||
}, false},
|
||||
{" drive.file , drive.readonly", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.readonly",
|
||||
}, false},
|
||||
{" drive.file , drive.appfolder", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.appfolder",
|
||||
}, true},
|
||||
} {
|
||||
got := driveScopes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
gotFlag := driveScopesContainsAppFolder(got)
|
||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
@@ -170,7 +196,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
|
||||
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -184,7 +210,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -195,10 +221,10 @@ func (f *Fs) InternalTestDocumentExport(t *testing.T) {
|
||||
f.exportExtensions, _, err = parseExtensions("txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.txt")
|
||||
obj, err := f.NewObject(context.Background(), "example2.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
rc, err := obj.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
@@ -221,10 +247,10 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
f.exportExtensions, _, err = parseExtensions("link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.link.html")
|
||||
obj, err := f.NewObject(context.Background(), "example2.link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
rc, err := obj.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
// Test Drive filesystem interface
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -19,10 +19,10 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
@@ -56,9 +56,7 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
|
||||
"uploadType": {"resumable"},
|
||||
"fields": {partialFields},
|
||||
}
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
params.Set("supportsAllDrives", "true")
|
||||
if f.opt.KeepRevisionForever {
|
||||
params.Set("keepRevisionForever", "true")
|
||||
}
|
||||
@@ -183,7 +181,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
||||
// been 200 OK.
|
||||
//
|
||||
// So parse the response out of the body. We aren't expecting
|
||||
// any other 2xx codes, so we parse it unconditionaly on
|
||||
// any other 2xx codes, so we parse it unconditionally on
|
||||
// StatusCode
|
||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||
return 598, err
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox/dbhash"
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ of path_display and all will be well.
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -31,22 +32,23 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
@@ -129,8 +131,8 @@ Any files larger than this will be uploaded in chunks of this size.
|
||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||
deal with retries. Setting this larger will increase the speed
|
||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
@@ -159,7 +161,7 @@ type Fs struct {
|
||||
team team.Client // for the Teams API
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
}
|
||||
|
||||
@@ -203,8 +205,17 @@ func shouldRetry(err error) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// FIXME there is probably a better way of doing this!
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
// handle any official Retry-After header from Dropbox's SDK first
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
@@ -229,7 +240,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -263,7 +274,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
@@ -431,7 +442,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
@@ -444,7 +455,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
root := f.slashRoot
|
||||
if dir != "" {
|
||||
root += "/" + dir
|
||||
@@ -531,22 +542,22 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(in, src, options...)
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.slashRoot, dir)
|
||||
|
||||
// can't create or run metadata on root
|
||||
@@ -576,7 +587,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.slashRoot, dir)
|
||||
|
||||
// can't remove root
|
||||
@@ -632,7 +643,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -677,7 +688,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() (err error) {
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
// Let dropbox delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
|
||||
@@ -695,7 +706,7 @@ func (f *Fs) Purge() (err error) {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
@@ -735,7 +746,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
absPath := "/" + path.Join(f.Root(), remote)
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
@@ -788,7 +799,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
@@ -824,7 +835,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var q *users.SpaceUsage
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
q, err = f.users.GetSpaceUsage()
|
||||
@@ -876,7 +887,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.Dropbox {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -938,7 +949,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||
@@ -950,7 +961,7 @@ func (o *Object) ModTime() time.Time {
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
//
|
||||
// Commits the datastore
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
// Dropbox doesn't have a way of doing this so returning this
|
||||
// error will cause the file to be deleted first then
|
||||
// re-uploaded to set the time.
|
||||
@@ -963,7 +974,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1089,7 +1100,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.Logf(o, "File name disallowed - not uploading")
|
||||
@@ -1098,7 +1109,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
commitInfo := files.NewCommitInfo(o.remotePath())
|
||||
commitInfo.Mode.Tag = "overwrite"
|
||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||
commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second)
|
||||
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||
|
||||
size := src.Size()
|
||||
var err error
|
||||
@@ -1118,7 +1129,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() (err error) {
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
|
||||
return shouldRetry(err)
|
||||
|
||||
@@ -4,8 +4,8 @@ package dropbox
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
386
backend/fichier/api.go
Normal file
386
backend/fichier/api.go
Normal file
@@ -0,0 +1,386 @@
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||
|
||||
func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
Single: 1,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/download/get_token.cgi",
|
||||
}
|
||||
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, &token)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
return &token, nil
|
||||
}
|
||||
|
||||
func fileFromSharedFile(file *SharedFile) File {
|
||||
return File{
|
||||
URL: file.Link,
|
||||
Filename: file.Filename,
|
||||
Size: file.Size,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
}
|
||||
|
||||
var sharedFiles SharedFolderResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
entries = make([]fs.DirEntry, len(sharedFiles))
|
||||
|
||||
for i, sharedFile := range sharedFiles {
|
||||
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
|
||||
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
|
||||
request := ListFilesRequest{
|
||||
FolderID: directoryID,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/ls.cgi",
|
||||
}
|
||||
|
||||
filesList = &FilesList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, filesList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
return filesList, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
|
||||
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
|
||||
|
||||
request := ListFolderRequest{
|
||||
FolderID: directoryID,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/ls.cgi",
|
||||
}
|
||||
|
||||
foldersList = &FoldersList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, foldersList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
|
||||
|
||||
return foldersList, err
|
||||
}
|
||||
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files, err := f.listFiles(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folders, err := f.listFolders(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
|
||||
|
||||
for i, item := range files.Items {
|
||||
entries[i] = f.newObjectFromFile(ctx, dir, item)
|
||||
}
|
||||
|
||||
for i, folder := range folders.SubFolders {
|
||||
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
fullPath := getRemote(dir, folder.Name)
|
||||
folderID := strconv.Itoa(folder.ID)
|
||||
|
||||
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
|
||||
|
||||
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
|
||||
f.dirCache.Put(fullPath, folderID)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: getRemote(dir, item.Filename),
|
||||
file: item,
|
||||
}
|
||||
}
|
||||
|
||||
func getRemote(dir, fileName string) string {
|
||||
if dir == "" {
|
||||
return fileName
|
||||
}
|
||||
|
||||
return dir + "/" + fileName
|
||||
}
|
||||
|
||||
func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
name := replaceReservedChars(leaf)
|
||||
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
request := MakeFolderRequest{
|
||||
FolderID: folderID,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/mkdir.cgi",
|
||||
}
|
||||
|
||||
response = &MakeFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create folder")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
|
||||
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
|
||||
|
||||
request := &RemoveFolderRequest{
|
||||
FolderID: folderID,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/rm.cgi",
|
||||
}
|
||||
|
||||
response = &GenericOKResponse{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(&opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||
}
|
||||
if response.Status != "OK" {
|
||||
return nil, errors.New("Can't remove non-empty dir")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
|
||||
request := &RemoveFileRequest{
|
||||
Files: []RmFile{
|
||||
{url},
|
||||
},
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/rm.cgi",
|
||||
}
|
||||
|
||||
response = &GenericOKResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove file")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed file with url `%s`", url)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
ContentType: "application/json", // 1Fichier API is bad
|
||||
Path: "/upload/get_upload_server.cgi",
|
||||
}
|
||||
|
||||
response = &GetUploadNodeResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/upload.cgi",
|
||||
Parameters: map[string][]string{
|
||||
"id": {uploadID},
|
||||
},
|
||||
NoResponse: true,
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
MultipartContentName: "file[]",
|
||||
MultipartFileName: fileName,
|
||||
MultipartParams: map[string][]string{
|
||||
"did": {folderID},
|
||||
},
|
||||
}
|
||||
|
||||
if node != "" {
|
||||
opts.RootURL = "https://" + node
|
||||
}
|
||||
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't upload file")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/end.pl",
|
||||
RootURL: "https://" + nodeurl,
|
||||
Parameters: map[string][]string{
|
||||
"xid": {uploadID},
|
||||
},
|
||||
ExtraHeaders: map[string]string{
|
||||
"JSON": "1",
|
||||
},
|
||||
}
|
||||
|
||||
response = &EndFileUploadResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't finish file upload")
|
||||
}
|
||||
|
||||
return response, err
|
||||
}
|
||||
411
backend/fichier/fichier.go
Normal file
411
backend/fichier/fichier.go
Normal file
@@ -0,0 +1,411 @@
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
rootID = "0"
|
||||
apiBaseURL = "https://api.1fichier.com/v1"
|
||||
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
|
||||
maxSleep = 5 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
Description: "1Fichier",
|
||||
Config: func(name string, config configmap.Mapper) {
|
||||
},
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Name: "api_key",
|
||||
},
|
||||
{
|
||||
Help: "If you want to download a shared folder, add this parameter",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
}
|
||||
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
type Fs struct {
|
||||
root string
|
||||
name string
|
||||
features *fs.Features
|
||||
dirCache *dircache.DirCache
|
||||
baseClient *http.Client
|
||||
options *Options
|
||||
pacer *fs.Pacer
|
||||
rest *rest.Client
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
folderID, err := strconv.Atoi(pathID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
folders, err := f.listFolders(folderID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
for _, folder := range folders.SubFolders {
|
||||
if folder.Name == leaf {
|
||||
pathIDOut := strconv.Itoa(folder.ID)
|
||||
return pathIDOut, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
folderID, err := strconv.Atoi(pathID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp, err := f.makeFolder(leaf, folderID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strconv.Itoa(resp.FolderID), err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("1Fichier root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.Whirlpool)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// NewFs makes a new Fs object from the path
|
||||
//
|
||||
// The path is of the form remote:path
|
||||
//
|
||||
// Remotes are looked up in the config file. If the remote isn't
|
||||
// found then NotFoundInConfigFile will be returned.
|
||||
//
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
|
||||
root := replaceReservedChars(rootleaf)
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(config, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If using a Shared Folder override root
|
||||
if opt.SharedFolder != "" {
|
||||
root = ""
|
||||
}
|
||||
|
||||
//workaround for wonky parser
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
options: opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
baseClient: &http.Client{},
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
|
||||
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
|
||||
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
|
||||
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.options.SharedFolder != "" {
|
||||
return f.listSharedFiles(ctx, f.options.SharedFolder)
|
||||
}
|
||||
|
||||
dirContent, err := f.listDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dirContent, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := f.listFiles(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files.Items {
|
||||
if file.Filename == leaf {
|
||||
path, ok := f.dirCache.GetInv(directoryID)
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("Cannot find dir in dircache")
|
||||
}
|
||||
|
||||
return f.newObjectFromFile(ctx, path, file), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// putUnchecked uploads the object with the given name and size
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100E9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
nodeResponse, err := f.getUploadNode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(fileUploadResponse.Links) != 1 {
|
||||
return nil, errors.New("unexpected amount of files")
|
||||
}
|
||||
|
||||
link := fileUploadResponse.Links[0]
|
||||
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
file: File{
|
||||
ACL: 0,
|
||||
CDN: 0,
|
||||
Checksum: link.Whirlpool,
|
||||
ContentType: "",
|
||||
Date: time.Now().Format("2006-01-02 15:04:05"),
|
||||
Filename: link.Filename,
|
||||
Pass: 0,
|
||||
Size: int(fileSize),
|
||||
URL: link.Download,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.removeFolder(dir, folderID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.dirCache.FlushDir(dir)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ dircache.DirCacher = (*Fs)(nil)
|
||||
)
|
||||
17
backend/fichier/fichier_test.go
Normal file
17
backend/fichier/fichier_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test 1Fichier filesystem interface
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFichier:",
|
||||
})
|
||||
}
|
||||
158
backend/fichier/object.go
Normal file
158
backend/fichier/object.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Object is a filesystem like object provided by an Fs
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
file File
|
||||
}
|
||||
|
||||
// String returns a description of the Object
|
||||
func (o *Object) String() string {
|
||||
return o.file.Filename
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
|
||||
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
return modTime
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.file.Size)
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.Whirlpool {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
return o.file.Checksum, nil
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
//return errors.New("setting modtime is not supported for 1fichier remotes")
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, int64(o.file.Size))
|
||||
downloadToken, err := o.fs.getDownloadToken(o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: downloadToken.URL,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.rest.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if src.Size() < 0 {
|
||||
return errors.New("refusing to update with unknown size")
|
||||
}
|
||||
|
||||
// upload with new size but old name
|
||||
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete duplicate after successful upload
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to remove old version")
|
||||
}
|
||||
|
||||
// Replace guts of old object with new one
|
||||
*o = *info.(*Object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes this object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
|
||||
|
||||
_, err := o.fs.deleteFile(o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.file.ContentType
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.file.URL
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
71
backend/fichier/replace.go
Normal file
71
backend/fichier/replace.go
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
Translate file names for 1fichier
|
||||
|
||||
1Fichier reserved characters
|
||||
|
||||
The following characters are 1Fichier reserved characters, and can't
|
||||
be used in 1Fichier folder and file names.
|
||||
|
||||
*/
|
||||
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// 1Fichier has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||
'$': '$', // FULLWIDTH DOLLAR SIGN
|
||||
'`': '`', // FULLWIDTH GRAVE ACCENT
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// file names can't start with space either
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Replace reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
24
backend/fichier/replace_test.go
Normal file
24
backend/fichier/replace_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package fichier
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{"\"'<>/\\$`", `"'<>/\$``},
|
||||
{" leading space", "␠leading space"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
120
backend/fichier/structs.go
Normal file
120
backend/fichier/structs.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package fichier
|
||||
|
||||
// ListFolderRequest is the request structure of the corresponding request
|
||||
type ListFolderRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// ListFilesRequest is the request structure of the corresponding request
|
||||
type ListFilesRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// DownloadRequest is the request structure of the corresponding request
|
||||
type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
type RemoveFolderRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// RemoveFileRequest is the request structure of the corresponding request
|
||||
type RemoveFileRequest struct {
|
||||
Files []RmFile `json:"files"`
|
||||
}
|
||||
|
||||
// RmFile is the request structure of the corresponding request
|
||||
type RmFile struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// GenericOKResponse is the response structure of the corresponding request
|
||||
type GenericOKResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// MakeFolderRequest is the request structure of the corresponding request
|
||||
type MakeFolderRequest struct {
|
||||
Name string `json:"name"`
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// MakeFolderResponse is the response structure of the corresponding request
|
||||
type MakeFolderResponse struct {
|
||||
Name string `json:"name"`
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||
type GetUploadNodeResponse struct {
|
||||
ID string `json:"id"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// GetTokenResponse is the response structure of the corresponding request
|
||||
type GetTokenResponse struct {
|
||||
URL string `json:"url"`
|
||||
Status string `json:"Status"`
|
||||
Message string `json:"Message"`
|
||||
}
|
||||
|
||||
// SharedFolderResponse is the response structure of the corresponding request
|
||||
type SharedFolderResponse []SharedFile
|
||||
|
||||
// SharedFile is the structure how 1Fichier returns a shared File
|
||||
type SharedFile struct {
|
||||
Filename string `json:"filename"`
|
||||
Link string `json:"link"`
|
||||
Size int `json:"size"`
|
||||
}
|
||||
|
||||
// EndFileUploadResponse is the response structure of the corresponding request
|
||||
type EndFileUploadResponse struct {
|
||||
Incoming int `json:"incoming"`
|
||||
Links []struct {
|
||||
Download string `json:"download"`
|
||||
Filename string `json:"filename"`
|
||||
Remove string `json:"remove"`
|
||||
Size string `json:"size"`
|
||||
Whirlpool string `json:"whirlpool"`
|
||||
} `json:"links"`
|
||||
}
|
||||
|
||||
// File is the structure how 1Fichier returns a File
|
||||
type File struct {
|
||||
ACL int `json:"acl"`
|
||||
CDN int `json:"cdn"`
|
||||
Checksum string `json:"checksum"`
|
||||
ContentType string `json:"content-type"`
|
||||
Date string `json:"date"`
|
||||
Filename string `json:"filename"`
|
||||
Pass int `json:"pass"`
|
||||
Size int `json:"size"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// FilesList is the structure how 1Fichier returns a list of files
|
||||
type FilesList struct {
|
||||
Items []File `json:"items"`
|
||||
Status string `json:"Status"`
|
||||
}
|
||||
|
||||
// Folder is the structure how 1Fichier returns a Folder
|
||||
type Folder struct {
|
||||
CreateDate string `json:"create_date"`
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Pass int `json:"pass"`
|
||||
}
|
||||
|
||||
// FoldersList is the structure how 1Fichier returns a list of Folders
|
||||
type FoldersList struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"Status"`
|
||||
SubFolders []Folder `json:"sub_folders"`
|
||||
}
|
||||
@@ -2,6 +2,8 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net/textproto"
|
||||
"os"
|
||||
@@ -10,13 +12,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -45,6 +48,20 @@ func init() {
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: "Use FTP over TLS (Implicit)",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_certificate",
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -52,10 +69,13 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -70,6 +90,7 @@ type Fs struct {
|
||||
dialAddr string
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
tokens *pacer.TokenDispenser
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
@@ -112,7 +133,15 @@ func (f *Fs) Features() *fs.Features {
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
||||
if f.opt.TLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
||||
@@ -128,6 +157,9 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
|
||||
// Get an FTP connection from the pool, or open a new one
|
||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
f.tokens.Get()
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
if len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
@@ -147,6 +179,9 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
// if err is not nil then it checks the connection is alive using a
|
||||
// NOOP request
|
||||
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
defer f.tokens.Put()
|
||||
}
|
||||
c := *pc
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
@@ -166,8 +201,9 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
ctx := context.Background()
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -189,7 +225,11 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
|
||||
dialAddr := opt.Host + ":" + port
|
||||
u := "ftp://" + path.Join(dialAddr+"/", root)
|
||||
protocol := "ftp://"
|
||||
if opt.TLS {
|
||||
protocol = "ftps://"
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
@@ -198,6 +238,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
user: user,
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -215,7 +256,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
@@ -280,7 +321,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
@@ -324,17 +365,42 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
files, err := c.List(path.Join(f.root, dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorDir(err)
|
||||
|
||||
var listErr error
|
||||
var files []*ftp.Entry
|
||||
|
||||
resultchan := make(chan []*ftp.Entry, 1)
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
result, err := c.List(path.Join(f.root, dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
errchan <- err
|
||||
return
|
||||
}
|
||||
resultchan <- result
|
||||
}()
|
||||
|
||||
// Wait for List for up to Timeout seconds
|
||||
timer := time.NewTimer(fs.Config.Timeout)
|
||||
select {
|
||||
case listErr = <-errchan:
|
||||
timer.Stop()
|
||||
return nil, translateErrorDir(listErr)
|
||||
case files = <-resultchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f, "Timeout when waiting for List")
|
||||
return nil, errors.New("Timeout when waiting for List")
|
||||
}
|
||||
|
||||
// Annoyingly FTP returns success for a directory which
|
||||
// doesn't exist, so check it really doesn't exist if no
|
||||
// entries found.
|
||||
@@ -389,7 +455,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||
err := f.mkParentDir(src.Remote())
|
||||
if err != nil {
|
||||
@@ -399,13 +465,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
err = o.Update(in, src, options...)
|
||||
err = o.Update(ctx, in, src, options...)
|
||||
return o, err
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// getInfo reads the FileInfo for a path
|
||||
@@ -483,7 +549,7 @@ func (f *Fs) mkParentDir(remote string) error {
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) (err error) {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||
root := path.Join(f.root, dir)
|
||||
return f.mkdir(root)
|
||||
@@ -492,7 +558,7 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
@@ -503,7 +569,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
|
||||
// Move renames a remote file object
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
@@ -525,7 +591,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move Rename failed")
|
||||
}
|
||||
dstObj, err := f.NewObject(remote)
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||
}
|
||||
@@ -540,7 +606,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
@@ -603,7 +669,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the hash of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
@@ -613,12 +679,12 @@ func (o *Object) Size() int64 {
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime() time.Time {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.info.ModTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -646,7 +712,21 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Close the FTP reader and return the connection to the pool
|
||||
func (f *ftpReadCloser) Close() error {
|
||||
err := f.rc.Close()
|
||||
var err error
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
||||
return nil
|
||||
}
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
@@ -665,7 +745,7 @@ func (f *ftpReadCloser) Close() error {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
var offset, limit int64 = 0, -1
|
||||
@@ -699,7 +779,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// remove the file if upload failed
|
||||
@@ -709,7 +789,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
||||
// able to think of a better method to find out if the server has finished - ncw
|
||||
time.Sleep(1 * time.Second)
|
||||
removeErr := o.Remove()
|
||||
removeErr := o.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||
} else {
|
||||
@@ -735,7 +815,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() (err error) {
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// defer fs.Trace(o, "")("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// Check if it's a directory or a file
|
||||
@@ -744,7 +824,7 @@ func (o *Object) Remove() (err error) {
|
||||
return err
|
||||
}
|
||||
if info.IsDir {
|
||||
err = o.fs.Rmdir(o.remote)
|
||||
err = o.fs.Rmdir(ctx, o.remote)
|
||||
} else {
|
||||
c, err := o.fs.getFtpConnection()
|
||||
if err != nil {
|
||||
|
||||
@@ -4,8 +4,8 @@ package ftp_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/ftp"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/ftp"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -13,6 +13,7 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
@@ -27,21 +28,23 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
// NOTE: This API is deprecated
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
@@ -58,7 +61,7 @@ const (
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageFullControlScope},
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
@@ -141,6 +144,22 @@ func init() {
|
||||
Value: "publicReadWrite",
|
||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_policy_only",
|
||||
Help: `Access checks should use bucket-level IAM policies.
|
||||
|
||||
If you want to upload objects to a bucket with Bucket Policy Only set
|
||||
then you will need to set this.
|
||||
|
||||
When it is set, rclone:
|
||||
|
||||
- ignores ACLs set on buckets
|
||||
- ignores ACLs set on objects
|
||||
- creates buckets with Bucket Policy Only set
|
||||
|
||||
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "location",
|
||||
Help: "Location for the newly created buckets.",
|
||||
@@ -159,21 +178,36 @@ func init() {
|
||||
}, {
|
||||
Value: "asia-east1",
|
||||
Help: "Taiwan.",
|
||||
}, {
|
||||
Value: "asia-east2",
|
||||
Help: "Hong Kong.",
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo.",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai.",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore.",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney.",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland.",
|
||||
}, {
|
||||
Value: "europe-west1",
|
||||
Help: "Belgium.",
|
||||
}, {
|
||||
Value: "europe-west2",
|
||||
Help: "London.",
|
||||
}, {
|
||||
Value: "europe-west3",
|
||||
Help: "Frankfurt.",
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands.",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa.",
|
||||
@@ -186,6 +220,9 @@ func init() {
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon.",
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -220,6 +257,7 @@ type Options struct {
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
}
|
||||
@@ -235,7 +273,7 @@ type Fs struct {
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -279,7 +317,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
again = false
|
||||
if err != nil {
|
||||
@@ -327,7 +365,7 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
|
||||
@@ -360,7 +398,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
ctx := context.Background()
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -374,7 +416,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -431,7 +473,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
@@ -443,7 +485,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err error) {
|
||||
root := f.root
|
||||
rootLength := len(root)
|
||||
if dir != "" {
|
||||
@@ -532,9 +574,9 @@ func (f *Fs) markBucketOK() {
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
err = f.list(ctx, dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -591,11 +633,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.bucket == "" {
|
||||
return f.listBuckets(dir)
|
||||
}
|
||||
return f.listDir(dir)
|
||||
return f.listDir(ctx, dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -614,12 +656,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
err = f.list(ctx, dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -639,22 +681,22 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(in, src, options...)
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) (err error) {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
@@ -688,8 +730,19 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -702,7 +755,7 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(dir string) (err error) {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
@@ -732,8 +785,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir("")
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -792,7 +845,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -866,7 +919,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
@@ -883,7 +936,7 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) (err error) {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
// This only adds metadata so will perserve other metadata
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
@@ -908,7 +961,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
req, err := http.NewRequest("GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -939,23 +992,26 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
err := o.fs.Mkdir("")
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
err := o.fs.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
modTime := src.ModTime()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -967,7 +1023,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() (err error) {
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
@@ -976,7 +1032,7 @@ func (o *Object) Remove() (err error) {
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
148
backend/googlephotos/albums.go
Normal file
148
backend/googlephotos/albums.go
Normal file
@@ -0,0 +1,148 @@
|
||||
// This file contains the albums abstraction
|
||||
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
)
|
||||
|
||||
// All the albums
|
||||
type albums struct {
|
||||
mu sync.Mutex
|
||||
dupes map[string][]*api.Album // duplicated names
|
||||
byID map[string]*api.Album //..indexed by ID
|
||||
byTitle map[string]*api.Album //..indexed by Title
|
||||
path map[string][]string // partial album names to directory
|
||||
}
|
||||
|
||||
// Create a new album
|
||||
func newAlbums() *albums {
|
||||
return &albums{
|
||||
dupes: map[string][]*api.Album{},
|
||||
byID: map[string]*api.Album{},
|
||||
byTitle: map[string]*api.Album{},
|
||||
path: map[string][]string{},
|
||||
}
|
||||
}
|
||||
|
||||
// add an album
|
||||
func (as *albums) add(album *api.Album) {
|
||||
// Munge the name of the album into a sensible path name
|
||||
album.Title = path.Clean(album.Title)
|
||||
if album.Title == "." || album.Title == "/" {
|
||||
album.Title = addID("", album.ID)
|
||||
}
|
||||
|
||||
as.mu.Lock()
|
||||
as._add(album)
|
||||
as.mu.Unlock()
|
||||
}
|
||||
|
||||
// _add an album - call with lock held
|
||||
func (as *albums) _add(album *api.Album) {
|
||||
// update dupes by title
|
||||
dupes := as.dupes[album.Title]
|
||||
dupes = append(dupes, album)
|
||||
as.dupes[album.Title] = dupes
|
||||
|
||||
// Dedupe the album name if necessary
|
||||
if len(dupes) >= 2 {
|
||||
// If this is the first dupe, then need to adjust the first one
|
||||
if len(dupes) == 2 {
|
||||
firstAlbum := dupes[0]
|
||||
as._del(firstAlbum)
|
||||
as._add(firstAlbum)
|
||||
// undo add of firstAlbum to dupes
|
||||
as.dupes[album.Title] = dupes
|
||||
}
|
||||
album.Title = addID(album.Title, album.ID)
|
||||
}
|
||||
|
||||
// Store the new album
|
||||
as.byID[album.ID] = album
|
||||
as.byTitle[album.Title] = album
|
||||
|
||||
// Store the partial paths
|
||||
dir, leaf := album.Title, ""
|
||||
for dir != "" {
|
||||
i := strings.LastIndex(dir, "/")
|
||||
if i >= 0 {
|
||||
dir, leaf = dir[:i], dir[i+1:]
|
||||
} else {
|
||||
dir, leaf = "", dir
|
||||
}
|
||||
dirs := as.path[dir]
|
||||
found := false
|
||||
for _, dir := range dirs {
|
||||
if dir == leaf {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
as.path[dir] = append(as.path[dir], leaf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// del an album
|
||||
func (as *albums) del(album *api.Album) {
|
||||
as.mu.Lock()
|
||||
as._del(album)
|
||||
as.mu.Unlock()
|
||||
}
|
||||
|
||||
// _del an album - call with lock held
|
||||
func (as *albums) _del(album *api.Album) {
|
||||
// We leave in dupes so it doesn't cause albums to get renamed
|
||||
|
||||
// Remove from byID and byTitle
|
||||
delete(as.byID, album.ID)
|
||||
delete(as.byTitle, album.Title)
|
||||
|
||||
// Remove from paths
|
||||
dir, leaf := album.Title, ""
|
||||
for dir != "" {
|
||||
// Can't delete if this dir exists anywhere in the path structure
|
||||
if _, found := as.path[dir]; found {
|
||||
break
|
||||
}
|
||||
i := strings.LastIndex(dir, "/")
|
||||
if i >= 0 {
|
||||
dir, leaf = dir[:i], dir[i+1:]
|
||||
} else {
|
||||
dir, leaf = "", dir
|
||||
}
|
||||
dirs := as.path[dir]
|
||||
for i, dir := range dirs {
|
||||
if dir == leaf {
|
||||
dirs = append(dirs[:i], dirs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(dirs) == 0 {
|
||||
delete(as.path, dir)
|
||||
} else {
|
||||
as.path[dir] = dirs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get an album by title
|
||||
func (as *albums) get(title string) (album *api.Album, ok bool) {
|
||||
as.mu.Lock()
|
||||
defer as.mu.Unlock()
|
||||
album, ok = as.byTitle[title]
|
||||
return album, ok
|
||||
}
|
||||
|
||||
// getDirs gets directories below an album path
|
||||
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
|
||||
as.mu.Lock()
|
||||
defer as.mu.Unlock()
|
||||
dirs, ok = as.path[albumPath]
|
||||
return dirs, ok
|
||||
}
|
||||
311
backend/googlephotos/albums_test.go
Normal file
311
backend/googlephotos/albums_test.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewAlbums(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
assert.NotNil(t, albums.dupes)
|
||||
assert.NotNil(t, albums.byID)
|
||||
assert.NotNil(t, albums.byTitle)
|
||||
assert.NotNil(t, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsAdd(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{}, albums.path)
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one"},
|
||||
}, albums.path)
|
||||
|
||||
a2 := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2",
|
||||
}
|
||||
albums.add(a2)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"two": a2,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a duplicate
|
||||
a2a := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2a",
|
||||
}
|
||||
albums.add(a2a)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a sub directory
|
||||
a1sub := &api.Album{
|
||||
Title: "one/sub",
|
||||
ID: "1sub",
|
||||
}
|
||||
albums.add(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a weird path
|
||||
a0 := &api.Album{
|
||||
Title: "/../././..////.",
|
||||
ID: "0",
|
||||
}
|
||||
albums.add(a0)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"{0}": []*api.Album{a0},
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"0": a0,
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"{0}": a0,
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}", "{0}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsDel(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
a2 := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2",
|
||||
}
|
||||
albums.add(a2)
|
||||
|
||||
// Add a duplicate
|
||||
a2a := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2a",
|
||||
}
|
||||
albums.add(a2a)
|
||||
|
||||
// Add a sub directory
|
||||
a1sub := &api.Album{
|
||||
Title: "one/sub",
|
||||
ID: "1sub",
|
||||
}
|
||||
albums.add(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a1)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a2)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a2a)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{}, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsGet(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
album, ok := albums.get("one")
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, a1, album)
|
||||
|
||||
album, ok = albums.get("notfound")
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Nil(t, album)
|
||||
}
|
||||
|
||||
func TestAlbumsGetDirs(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
dirs, ok := albums.getDirs("")
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, []string{"one"}, dirs)
|
||||
|
||||
dirs, ok = albums.getDirs("notfound")
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Nil(t, dirs)
|
||||
}
|
||||
190
backend/googlephotos/api/types.go
Normal file
190
backend/googlephotos/api/types.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrorDetails in the internals of the Error type
|
||||
type ErrorDetails struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error is returned on errors
|
||||
type Error struct {
|
||||
Details ErrorDetails `json:"error"`
|
||||
}
|
||||
|
||||
// Error statisfies error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
|
||||
}
|
||||
|
||||
// Album of photos
|
||||
type Album struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Title string `json:"title"`
|
||||
ProductURL string `json:"productUrl,omitempty"`
|
||||
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
|
||||
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
|
||||
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
|
||||
IsWriteable bool `json:"isWriteable,omitempty"`
|
||||
}
|
||||
|
||||
// ListAlbums is returned from albums.list and sharedAlbums.list
|
||||
type ListAlbums struct {
|
||||
Albums []Album `json:"albums"`
|
||||
SharedAlbums []Album `json:"sharedAlbums"`
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
// CreateAlbum creates an Album
|
||||
type CreateAlbum struct {
|
||||
Album *Album `json:"album"`
|
||||
}
|
||||
|
||||
// MediaItem is a photo or video
|
||||
type MediaItem struct {
|
||||
ID string `json:"id"`
|
||||
ProductURL string `json:"productUrl"`
|
||||
BaseURL string `json:"baseUrl"`
|
||||
MimeType string `json:"mimeType"`
|
||||
MediaMetadata struct {
|
||||
CreationTime time.Time `json:"creationTime"`
|
||||
Width string `json:"width"`
|
||||
Height string `json:"height"`
|
||||
Photo struct {
|
||||
} `json:"photo"`
|
||||
} `json:"mediaMetadata"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
|
||||
// MediaItems is returned from mediaitems.list, mediaitems.search
|
||||
type MediaItems struct {
|
||||
MediaItems []MediaItem `json:"mediaItems"`
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
//Content categories
|
||||
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
||||
// LANDSCAPES Media items containing landscapes.
|
||||
// RECEIPTS Media items containing receipts.
|
||||
// CITYSCAPES Media items containing cityscapes.
|
||||
// LANDMARKS Media items containing landmarks.
|
||||
// SELFIES Media items that are selfies.
|
||||
// PEOPLE Media items containing people.
|
||||
// PETS Media items containing pets.
|
||||
// WEDDINGS Media items from weddings.
|
||||
// BIRTHDAYS Media items from birthdays.
|
||||
// DOCUMENTS Media items containing documents.
|
||||
// TRAVEL Media items taken during travel.
|
||||
// ANIMALS Media items containing animals.
|
||||
// FOOD Media items containing food.
|
||||
// SPORT Media items from sporting events.
|
||||
// NIGHT Media items taken at night.
|
||||
// PERFORMANCES Media items from performances.
|
||||
// WHITEBOARDS Media items containing whiteboards.
|
||||
// SCREENSHOTS Media items that are screenshots.
|
||||
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
|
||||
// ARTS Media items containing art.
|
||||
// CRAFTS Media items containing crafts.
|
||||
// FASHION Media items related to fashion.
|
||||
// HOUSES Media items containing houses.
|
||||
// GARDENS Media items containing gardens.
|
||||
// FLOWERS Media items containing flowers.
|
||||
// HOLIDAYS Media items taken of holidays.
|
||||
|
||||
// MediaTypes
|
||||
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
|
||||
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
|
||||
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
|
||||
|
||||
// Features
|
||||
// NONE Treated as if no filters are applied. All features are included.
|
||||
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
|
||||
|
||||
// Date is used as part of SearchFilter
|
||||
type Date struct {
|
||||
Year int `json:"year,omitempty"`
|
||||
Month int `json:"month,omitempty"`
|
||||
Day int `json:"day,omitempty"`
|
||||
}
|
||||
|
||||
// DateFilter is uses to add date ranges to media item queries
|
||||
type DateFilter struct {
|
||||
Dates []Date `json:"dates,omitempty"`
|
||||
Ranges []struct {
|
||||
StartDate Date `json:"startDate,omitempty"`
|
||||
EndDate Date `json:"endDate,omitempty"`
|
||||
} `json:"ranges,omitempty"`
|
||||
}
|
||||
|
||||
// ContentFilter is uses to add content categories to media item queries
|
||||
type ContentFilter struct {
|
||||
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
|
||||
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
|
||||
}
|
||||
|
||||
// MediaTypeFilter is uses to add media types to media item queries
|
||||
type MediaTypeFilter struct {
|
||||
MediaTypes []string `json:"mediaTypes,omitempty"`
|
||||
}
|
||||
|
||||
// FeatureFilter is uses to add features to media item queries
|
||||
type FeatureFilter struct {
|
||||
IncludedFeatures []string `json:"includedFeatures,omitempty"`
|
||||
}
|
||||
|
||||
// Filters combines all the filter types for media item queries
|
||||
type Filters struct {
|
||||
DateFilter *DateFilter `json:"dateFilter,omitempty"`
|
||||
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
|
||||
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
|
||||
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
|
||||
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
|
||||
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
|
||||
}
|
||||
|
||||
// SearchFilter is uses with mediaItems.search
|
||||
type SearchFilter struct {
|
||||
AlbumID string `json:"albumId,omitempty"`
|
||||
PageSize int `json:"pageSize"`
|
||||
PageToken string `json:"pageToken,omitempty"`
|
||||
Filters *Filters `json:"filters,omitempty"`
|
||||
}
|
||||
|
||||
// SimpleMediaItem is part of NewMediaItem
|
||||
type SimpleMediaItem struct {
|
||||
UploadToken string `json:"uploadToken"`
|
||||
}
|
||||
|
||||
// NewMediaItem is a single media item for upload
|
||||
type NewMediaItem struct {
|
||||
Description string `json:"description"`
|
||||
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
|
||||
}
|
||||
|
||||
// BatchCreateRequest creates media items from upload tokens
|
||||
type BatchCreateRequest struct {
|
||||
AlbumID string `json:"albumId,omitempty"`
|
||||
NewMediaItems []NewMediaItem `json:"newMediaItems"`
|
||||
}
|
||||
|
||||
// BatchCreateResponse is returned from BatchCreateRequest
|
||||
type BatchCreateResponse struct {
|
||||
NewMediaItemResults []struct {
|
||||
UploadToken string `json:"uploadToken"`
|
||||
Status struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
} `json:"status"`
|
||||
MediaItem MediaItem `json:"mediaItem"`
|
||||
} `json:"newMediaItemResults"`
|
||||
}
|
||||
|
||||
// BatchRemoveItems is for removing items from an album
|
||||
type BatchRemoveItems struct {
|
||||
MediaItemIds []string `json:"mediaItemIds"`
|
||||
}
|
||||
970
backend/googlephotos/googlephotos.go
Normal file
970
backend/googlephotos/googlephotos.go
Normal file
@@ -0,0 +1,970 @@
|
||||
// Package googlephotos provides an interface to Google Photos
|
||||
package googlephotos
|
||||
|
||||
// FIXME Resumable uploads not implemented - rclone can't resume uploads in general
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
golog "log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
var (
|
||||
errCantUpload = errors.New("can't upload files here")
|
||||
errCantMkdir = errors.New("can't make directories here")
|
||||
errCantRmdir = errors.New("can't remove this directory")
|
||||
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
|
||||
errRemove = errors.New("google photos API only implements removing files from albums")
|
||||
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "202264815644-rt1o1c9evjaotbpbab10m83i8cnjk077.apps.googleusercontent.com"
|
||||
rcloneEncryptedClientSecret = "kLJLretPefBgrDHosdml_nlF64HZ9mUcO85X5rdjYBPP8ChA-jr3Ow"
|
||||
rootURL = "https://photoslibrary.googleapis.com/v1"
|
||||
listChunks = 100 // chunk size to read directory listings
|
||||
albumChunks = 50 // chunk size to read album listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
scopeReadWrite,
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "google photos",
|
||||
Prefix: "gphotos",
|
||||
Description: "Google Photos",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[0] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[0] = scopeReadWrite
|
||||
}
|
||||
|
||||
// Do the oauth
|
||||
err = oauthutil.Config("google photos", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
|
||||
// Warn the user
|
||||
fmt.Print(`
|
||||
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
*** are stored in full resolution at original quality. These uploads
|
||||
*** will count towards storage in your Google Account.
|
||||
|
||||
`)
|
||||
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "read_only",
|
||||
Default: false,
|
||||
Help: `Set to make the Google Photos backend read only.
|
||||
|
||||
If you choose read only then rclone will only request read only access
|
||||
to your photos, otherwise rclone will request full access.`,
|
||||
}, {
|
||||
Name: "read_size",
|
||||
Default: false,
|
||||
Help: `Set to read the size of media items.
|
||||
|
||||
Normally rclone does not read the size of media items since this takes
|
||||
another transaction. This isn't necessary for syncing. However
|
||||
rclone mount needs to know the size of files in advance of reading
|
||||
them, so setting this flag when using rclone mount is recommended if
|
||||
you want to read the media.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
albumsMu sync.Mutex // protect albums (but not contents)
|
||||
albums map[bool]*albums // albums, shared or not
|
||||
uploadedMu sync.Mutex // to protect the below
|
||||
uploaded dirtree.DirTree // record of uploaded items
|
||||
createMu sync.Mutex // held when creating albums to prevent dupes
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
url string // download path
|
||||
id string // ID of this object
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
mimeType string
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Google Photos path %q", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// dirTime returns the time to set a directory to
|
||||
func (f *Fs) dirTime() time.Time {
|
||||
return f.startTime
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
body = nil
|
||||
}
|
||||
var e = api.Error{
|
||||
Details: api.ErrorDetails{
|
||||
Code: resp.StatusCode,
|
||||
Message: string(body),
|
||||
Status: resp.Status,
|
||||
},
|
||||
}
|
||||
if body != nil {
|
||||
_ = json.Unmarshal(body, &e)
|
||||
}
|
||||
return &e
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
|
||||
root = strings.Trim(path.Clean(root), "/")
|
||||
if root == "." || root == "/" {
|
||||
root = ""
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
startTime: time.Now(),
|
||||
albums: map[bool]*albums{},
|
||||
uploaded: dirtree.New(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
_, _, pattern := patterns.match(f.root, "", true)
|
||||
if pattern != nil && pattern.isFile {
|
||||
oldRoot := f.root
|
||||
var leaf string
|
||||
f.root, leaf = path.Split(f.root)
|
||||
f.root = strings.TrimRight(f.root, "/")
|
||||
_, err := f.NewObject(context.TODO(), leaf)
|
||||
if err == nil {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
f.root = oldRoot
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.MediaItem) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("")
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// addID adds the ID to name
|
||||
func addID(name string, ID string) string {
|
||||
idStr := "{" + ID + "}"
|
||||
if name == "" {
|
||||
return idStr
|
||||
}
|
||||
return name + " " + idStr
|
||||
}
|
||||
|
||||
// addFileID adds the ID to the fileName passed in
|
||||
func addFileID(fileName string, ID string) string {
|
||||
ext := path.Ext(fileName)
|
||||
base := fileName[:len(fileName)-len(ext)]
|
||||
return addID(base, ID) + ext
|
||||
}
|
||||
|
||||
var idRe = regexp.MustCompile(`\{([A-Za-z0-9_-]{55,})\}`)
|
||||
|
||||
// findID finds an ID in string if one is there or ""
|
||||
func findID(name string) string {
|
||||
match := idRe.FindStringSubmatch(name)
|
||||
if match == nil {
|
||||
return ""
|
||||
}
|
||||
return match[1]
|
||||
}
|
||||
|
||||
// list the albums into an internal cache
|
||||
// FIXME cache invalidation
|
||||
func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
|
||||
f.albumsMu.Lock()
|
||||
defer f.albumsMu.Unlock()
|
||||
all, ok := f.albums[shared]
|
||||
if ok && all != nil {
|
||||
return all, nil
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/albums",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
if shared {
|
||||
opts.Path = "/sharedAlbums"
|
||||
}
|
||||
all = newAlbums()
|
||||
opts.Parameters.Set("pageSize", strconv.Itoa(albumChunks))
|
||||
lastID := ""
|
||||
for {
|
||||
var result api.ListAlbums
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list albums")
|
||||
}
|
||||
newAlbums := result.Albums
|
||||
if shared {
|
||||
newAlbums = result.SharedAlbums
|
||||
}
|
||||
if len(newAlbums) > 0 && newAlbums[0].ID == lastID {
|
||||
// skip first if ID duplicated from last page
|
||||
newAlbums = newAlbums[1:]
|
||||
}
|
||||
if len(newAlbums) > 0 {
|
||||
lastID = newAlbums[len(newAlbums)-1].ID
|
||||
}
|
||||
for i := range newAlbums {
|
||||
all.add(&newAlbums[i])
|
||||
}
|
||||
if result.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
opts.Parameters.Set("pageToken", result.NextPageToken)
|
||||
}
|
||||
f.albums[shared] = all
|
||||
return all, nil
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *api.MediaItem, isDirectory bool) error
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/mediaItems:search",
|
||||
}
|
||||
filter.PageSize = listChunks
|
||||
filter.PageToken = ""
|
||||
lastID := ""
|
||||
for {
|
||||
var result api.MediaItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &filter, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
items := result.MediaItems
|
||||
if len(items) > 0 && items[0].ID == lastID {
|
||||
// skip first if ID duplicated from last page
|
||||
items = items[1:]
|
||||
}
|
||||
if len(items) > 0 {
|
||||
lastID = items[len(items)-1].ID
|
||||
}
|
||||
for i := range items {
|
||||
item := &result.MediaItems[i]
|
||||
remote := item.Filename
|
||||
remote = strings.Replace(remote, "/", "/", -1)
|
||||
err = fn(remote, item, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if result.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
filter.PageToken = result.NextPageToken
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaItem, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, f.dirTime())
|
||||
return d, nil
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
o.setMetaData(item)
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Dedupe the file names
|
||||
dupes := map[string]int{}
|
||||
for _, entry := range entries {
|
||||
o, ok := entry.(*Object)
|
||||
if ok {
|
||||
dupes[o.remote]++
|
||||
}
|
||||
}
|
||||
for _, entry := range entries {
|
||||
o, ok := entry.(*Object)
|
||||
if ok {
|
||||
duplicated := dupes[o.remote] > 1
|
||||
if duplicated || o.remote == "" {
|
||||
o.remote = addFileID(o.remote, o.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// listUploads lists a single directory from the uploads
|
||||
func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
f.uploadedMu.Lock()
|
||||
entries, ok := f.uploaded[dir]
|
||||
f.uploadedMu.Unlock()
|
||||
if !ok && dir != "" {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil || pattern.isFile {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
if pattern.toEntries != nil {
|
||||
return pattern.toEntries(ctx, f, prefix, match)
|
||||
}
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
defer log.Trace(f, "src=%+v", src)("")
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// createAlbum creates the album
|
||||
func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/albums",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
var request = api.CreateAlbum{
|
||||
Album: &api.Album{
|
||||
Title: albumTitle,
|
||||
},
|
||||
}
|
||||
var result api.Album
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create album")
|
||||
}
|
||||
f.albums[false].add(&result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getOrCreateAlbum gets an existing album or creates a new one
|
||||
//
|
||||
// It does the creation with the lock held to avoid duplicates
|
||||
func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
|
||||
f.createMu.Lock()
|
||||
defer f.createMu.Unlock()
|
||||
albums, err := f.listAlbums(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
album, ok := albums.get(albumTitle)
|
||||
if ok {
|
||||
return album, nil
|
||||
}
|
||||
return f.createAlbum(ctx, albumTitle)
|
||||
}
|
||||
|
||||
// Mkdir creates the album if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
if !pattern.canMkdir {
|
||||
return errCantMkdir
|
||||
}
|
||||
if pattern.isUpload {
|
||||
f.uploadedMu.Lock()
|
||||
d := fs.NewDir(strings.Trim(prefix, "/"), f.dirTime())
|
||||
f.uploaded.AddEntry(d)
|
||||
f.uploadedMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
albumTitle := match[1]
|
||||
_, err = f.getOrCreateAlbum(ctx, albumTitle)
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
match, _, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
if !pattern.canMkdir {
|
||||
return errCantRmdir
|
||||
}
|
||||
if pattern.isUpload {
|
||||
f.uploadedMu.Lock()
|
||||
err = f.uploaded.Prune(map[string]bool{
|
||||
dir: true,
|
||||
})
|
||||
f.uploadedMu.Unlock()
|
||||
return err
|
||||
}
|
||||
albumTitle := match[1]
|
||||
allAlbums, err := f.listAlbums(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
album, ok := allAlbums.get(albumTitle)
|
||||
if !ok {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
_ = album
|
||||
return errAlbumDelete
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
defer log.Trace(o, "")("")
|
||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||
return o.bytes
|
||||
}
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Size: Failed to read metadata: %v", err)
|
||||
return -1
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "HEAD",
|
||||
RootURL: o.downloadURL(),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Reading size failed: %v", err)
|
||||
} else {
|
||||
lengthStr := resp.Header.Get("Content-Length")
|
||||
length, err := strconv.ParseInt(lengthStr, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Reading size failed to parse Content_length %q: %v", lengthStr, err)
|
||||
} else {
|
||||
o.bytes = length
|
||||
}
|
||||
}
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// setMetaData sets the fs data from a storage.Object
|
||||
func (o *Object) setMetaData(info *api.MediaItem) {
|
||||
o.url = info.BaseURL
|
||||
o.id = info.ID
|
||||
o.bytes = -1 // FIXME
|
||||
o.mimeType = info.MimeType
|
||||
o.modTime = info.MediaMetadata.CreationTime
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if !o.modTime.IsZero() && o.url != "" {
|
||||
return nil
|
||||
}
|
||||
dir, fileName := path.Split(o.remote)
|
||||
dir = strings.Trim(dir, "/")
|
||||
_, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
if !pattern.isFile {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// If have ID fetch it directly
|
||||
if id := findID(fileName); id != "" {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/mediaItems/" + id,
|
||||
}
|
||||
var item api.MediaItem
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &item)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't get media item")
|
||||
}
|
||||
o.setMetaData(&item)
|
||||
return nil
|
||||
}
|
||||
// Otherwise list the directory the file is in
|
||||
entries, err := o.fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
// and find the file in the directory
|
||||
for _, entry := range entries {
|
||||
if entry.Remote() == o.remote {
|
||||
if newO, ok := entry.(*Object); ok {
|
||||
*o = *newO
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
defer log.Trace(o, "")("")
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// downloadURL returns the URL for a full bytes download for the object
|
||||
func (o *Object) downloadURL() string {
|
||||
url := o.url + "=d"
|
||||
if strings.HasPrefix(o.mimeType, "video/") {
|
||||
url += "v"
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
defer log.Trace(o, "")("")
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.downloadURL(),
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||
return errCantUpload
|
||||
}
|
||||
var (
|
||||
albumID string
|
||||
fileName string
|
||||
)
|
||||
if pattern.isUpload {
|
||||
fileName = match[1]
|
||||
} else {
|
||||
var albumTitle string
|
||||
albumTitle, fileName = match[1], match[2]
|
||||
|
||||
album, err := o.fs.getOrCreateAlbum(ctx, albumTitle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !album.IsWriteable {
|
||||
return errOwnAlbums
|
||||
}
|
||||
|
||||
albumID = album.ID
|
||||
}
|
||||
|
||||
// Upload the media item in exchange for an UploadToken
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/uploads",
|
||||
ExtraHeaders: map[string]string{
|
||||
"X-Goog-Upload-File-Name": fileName,
|
||||
"X-Goog-Upload-Protocol": "raw",
|
||||
},
|
||||
Body: in,
|
||||
}
|
||||
var token []byte
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
if err != nil {
|
||||
_ = resp.Body.Close()
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
token, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't upload file")
|
||||
}
|
||||
uploadToken := strings.TrimSpace(string(token))
|
||||
if uploadToken == "" {
|
||||
return errors.New("empty upload token")
|
||||
}
|
||||
|
||||
// Create the media item from an UploadToken, optionally adding to an album
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/mediaItems:batchCreate",
|
||||
}
|
||||
var request = api.BatchCreateRequest{
|
||||
AlbumID: albumID,
|
||||
NewMediaItems: []api.NewMediaItem{
|
||||
{
|
||||
SimpleMediaItem: api.SimpleMediaItem{
|
||||
UploadToken: uploadToken,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var result api.BatchCreateResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create media item")
|
||||
}
|
||||
if len(result.NewMediaItemResults) != 1 {
|
||||
return errors.New("bad response to BatchCreate wrong number of items")
|
||||
}
|
||||
mediaItemResult := result.NewMediaItemResults[0]
|
||||
if mediaItemResult.Status.Code != 0 {
|
||||
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||
}
|
||||
o.setMetaData(&mediaItemResult.MediaItem)
|
||||
|
||||
// Add upload to internal storage
|
||||
if pattern.isUpload {
|
||||
o.fs.uploaded.AddEntry(o)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload || pattern.isUpload {
|
||||
return errRemove
|
||||
}
|
||||
albumTitle, fileName := match[1], match[2]
|
||||
album, ok := o.fs.albums[false].get(albumTitle)
|
||||
if !ok {
|
||||
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
|
||||
NoResponse: true,
|
||||
}
|
||||
var request = api.BatchRemoveItems{
|
||||
MediaItemIds: []string{o.id},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't delete item from album")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// ID of an Object if known, "" otherwise
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
307
backend/googlephotos/googlephotos_test.go
Normal file
307
backend/googlephotos/googlephotos_test.go
Normal file
@@ -0,0 +1,307 @@
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
// We have two different files here as Google Photos will uniq
|
||||
// them otherwise which confuses the tests as the filename is
|
||||
// unexpected.
|
||||
fileNameAlbum = "rclone-test-image1.jpg"
|
||||
fileNameUpload = "rclone-test-image2.jpg"
|
||||
)
|
||||
|
||||
// Wrapper to override the remote for an object
|
||||
type overrideRemoteObject struct {
|
||||
fs.Object
|
||||
remote string
|
||||
}
|
||||
|
||||
// Remote returns the overridden remote name
|
||||
func (o *overrideRemoteObject) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fstest.Initialise()
|
||||
|
||||
// Create Fs
|
||||
if *fstest.RemoteName == "" {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(*fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create local Fs pointing at testfiles
|
||||
localFs, err := fs.NewFs("testfiles")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CreateAlbum", func(t *testing.T) {
|
||||
albumName := "album/rclone-test-" + random.String(24)
|
||||
err = f.Mkdir(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
remote := albumName + "/" + fileNameAlbum
|
||||
|
||||
t.Run("PutFile", func(t *testing.T) {
|
||||
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
||||
|
||||
t.Run("ObjectFs", func(t *testing.T) {
|
||||
assert.Equal(t, f, dstObj.Fs())
|
||||
})
|
||||
|
||||
t.Run("ObjectString", func(t *testing.T) {
|
||||
assert.Equal(t, remote, dstObj.String())
|
||||
assert.Equal(t, "<nil>", (*Object)(nil).String())
|
||||
})
|
||||
|
||||
t.Run("ObjectHash", func(t *testing.T) {
|
||||
h, err := dstObj.Hash(ctx, hash.MD5)
|
||||
assert.Equal(t, "", h)
|
||||
assert.Equal(t, hash.ErrUnsupported, err)
|
||||
})
|
||||
|
||||
t.Run("ObjectSize", func(t *testing.T) {
|
||||
assert.Equal(t, int64(-1), dstObj.Size())
|
||||
f.(*Fs).opt.ReadSize = true
|
||||
defer func() {
|
||||
f.(*Fs).opt.ReadSize = false
|
||||
}()
|
||||
size := dstObj.Size()
|
||||
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
|
||||
})
|
||||
|
||||
t.Run("ObjectSetModTime", func(t *testing.T) {
|
||||
err := dstObj.SetModTime(ctx, time.Now())
|
||||
assert.Equal(t, fs.ErrorCantSetModTime, err)
|
||||
})
|
||||
|
||||
t.Run("ObjectStorable", func(t *testing.T) {
|
||||
assert.True(t, dstObj.Storable())
|
||||
})
|
||||
|
||||
t.Run("ObjectOpen", func(t *testing.T) {
|
||||
in, err := dstObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
buf, err := ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, in.Close())
|
||||
assert.True(t, len(buf) > 1000)
|
||||
contentType := http.DetectContentType(buf[:512])
|
||||
assert.Equal(t, "image/jpeg", contentType)
|
||||
})
|
||||
|
||||
t.Run("CheckFileInAlbum", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, remote, entries[0].Remote())
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
// Check it is there in the date/month/year heirachy
|
||||
// 2013-07-13 is the creation date of the folder
|
||||
checkPresent := func(t *testing.T, objPath string) {
|
||||
entries, err := f.List(ctx, objPath)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, entry := range entries {
|
||||
leaf := path.Base(entry.Remote())
|
||||
if leaf == fileNameAlbum || leaf == remoteWithID {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
|
||||
}
|
||||
|
||||
t.Run("CheckInByYear", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-year/2013")
|
||||
})
|
||||
|
||||
t.Run("CheckInByMonth", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-month/2013/2013-07")
|
||||
})
|
||||
|
||||
t.Run("CheckInByDay", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-day/2013/2013-07-26")
|
||||
})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remote, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewObjectWithID", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remoteWithID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remoteWithID, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewFsIsFile", func(t *testing.T) {
|
||||
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
|
||||
assert.Equal(t, fs.ErrorIsFile, err)
|
||||
leaf := path.Base(remote)
|
||||
o, err := fNew.NewObject(ctx, leaf)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, leaf, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
|
||||
err = dstObj.Remove(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// Check album empty
|
||||
entries, err := f.List(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(entries))
|
||||
})
|
||||
})
|
||||
|
||||
// remove the album
|
||||
err = f.Rmdir(ctx, albumName)
|
||||
require.Error(t, err) // FIXME doesn't work yet
|
||||
})
|
||||
|
||||
t.Run("UploadMkdir", func(t *testing.T) {
|
||||
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
|
||||
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
|
||||
|
||||
t.Run("List", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, "upload")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "upload/dir", entries[0].Remote())
|
||||
|
||||
entries, err = f.List(ctx, "upload/dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
|
||||
})
|
||||
|
||||
t.Run("Rmdir", func(t *testing.T) {
|
||||
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
|
||||
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
|
||||
|
||||
})
|
||||
|
||||
t.Run("ListEmpty", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, "upload")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(entries))
|
||||
|
||||
_, err = f.List(ctx, "upload/dir")
|
||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Upload", func(t *testing.T) {
|
||||
uploadDir := "upload/dir/subdir"
|
||||
remote := path.Join(uploadDir, fileNameUpload)
|
||||
|
||||
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
||||
|
||||
t.Run("List", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, uploadDir)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, remote, entries[0].Remote())
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remote, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewObjectWithID", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remoteWithID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remoteWithID, o.Remote())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
t.Run("Name", func(t *testing.T) {
|
||||
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
|
||||
})
|
||||
|
||||
t.Run("Root", func(t *testing.T) {
|
||||
assert.Equal(t, "", f.Root())
|
||||
})
|
||||
|
||||
t.Run("String", func(t *testing.T) {
|
||||
assert.Equal(t, `Google Photos path ""`, f.String())
|
||||
})
|
||||
|
||||
t.Run("Features", func(t *testing.T) {
|
||||
features := f.Features()
|
||||
assert.False(t, features.CaseInsensitive)
|
||||
assert.True(t, features.ReadMimeType)
|
||||
})
|
||||
|
||||
t.Run("Precision", func(t *testing.T) {
|
||||
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
|
||||
})
|
||||
|
||||
t.Run("Hashes", func(t *testing.T) {
|
||||
assert.Equal(t, hash.Set(hash.None), f.Hashes())
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAddID(t *testing.T) {
|
||||
assert.Equal(t, "potato {123}", addID("potato", "123"))
|
||||
assert.Equal(t, "{123}", addID("", "123"))
|
||||
}
|
||||
|
||||
func TestFileAddID(t *testing.T) {
|
||||
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
|
||||
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
|
||||
assert.Equal(t, "{123}", addFileID("", "123"))
|
||||
}
|
||||
|
||||
func TestFindID(t *testing.T) {
|
||||
assert.Equal(t, "", findID("potato"))
|
||||
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
|
||||
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
|
||||
ID = ID[1:]
|
||||
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
|
||||
}
|
||||
335
backend/googlephotos/pattern.go
Normal file
335
backend/googlephotos/pattern.go
Normal file
@@ -0,0 +1,335 @@
|
||||
// Store the parsing of file patterns
|
||||
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// lister describes the subset of the interfaces on Fs needed for the
|
||||
// file pattern parsing
|
||||
type lister interface {
|
||||
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
|
||||
listAlbums(shared bool) (all *albums, err error)
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
}
|
||||
|
||||
// dirPattern describes a single directory pattern
|
||||
type dirPattern struct {
|
||||
re string // match for the path
|
||||
match *regexp.Regexp // compiled match
|
||||
canUpload bool // true if can upload here
|
||||
canMkdir bool // true if can make a directory here
|
||||
isFile bool // true if this is a file
|
||||
isUpload bool // true if this is the upload directory
|
||||
// function to turn a match into DirEntries
|
||||
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
||||
}
|
||||
|
||||
// dirPatters is a slice of all the directory patterns
|
||||
type dirPatterns []dirPattern
|
||||
|
||||
// patterns describes the layout of the google photos backend file system.
|
||||
//
|
||||
// NB no trailing / on paths
|
||||
var patterns = dirPatterns{
|
||||
{
|
||||
re: `^$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return fs.DirEntries{
|
||||
fs.NewDir(prefix+"media", f.dirTime()),
|
||||
fs.NewDir(prefix+"album", f.dirTime()),
|
||||
fs.NewDir(prefix+"shared-album", f.dirTime()),
|
||||
fs.NewDir(prefix+"upload", f.dirTime()),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^upload(?:/(.*))?$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return f.listUploads(ctx, match[0])
|
||||
},
|
||||
canUpload: true,
|
||||
canMkdir: true,
|
||||
isUpload: true,
|
||||
},
|
||||
{
|
||||
re: `^upload/(.*)$`,
|
||||
isFile: true,
|
||||
canUpload: true,
|
||||
isUpload: true,
|
||||
},
|
||||
{
|
||||
re: `^media$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return fs.DirEntries{
|
||||
fs.NewDir(prefix+"all", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-year", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-month", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-day", f.dirTime()),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/all$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return f.listDir(ctx, prefix, api.SearchFilter{})
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/all/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-year$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-year/(\d{4})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-year/(\d{4})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/(\d{4})$`,
|
||||
toEntries: months,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/(\d{4})$`,
|
||||
toEntries: days,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^album$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, false, prefix, "")
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^album/(.+)$`,
|
||||
canMkdir: true,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, false, prefix, match[1])
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^album/(.+?)/([^/]+)$`,
|
||||
canUpload: true,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^shared-album$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, true, prefix, "")
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^shared-album/(.+)$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, true, prefix, match[1])
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^shared-album/(.+?)/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
}.mustCompile()
|
||||
|
||||
// mustCompile compiles the regexps in the dirPatterns
|
||||
func (ds dirPatterns) mustCompile() dirPatterns {
|
||||
for i := range ds {
|
||||
pattern := &ds[i]
|
||||
pattern.match = regexp.MustCompile(pattern.re)
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
// match finds the path passed in in the matching structure and
|
||||
// returns the parameters and a pointer to the match, or nil.
|
||||
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
|
||||
itemPath = strings.Trim(itemPath, "/")
|
||||
absPath := path.Join(root, itemPath)
|
||||
prefix = strings.Trim(absPath[len(root):], "/")
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
for i := range ds {
|
||||
pattern = &ds[i]
|
||||
if pattern.isFile != isFile {
|
||||
continue
|
||||
}
|
||||
match = pattern.match.FindStringSubmatch(absPath)
|
||||
if match != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
// Return the years from 2000 to today
|
||||
// FIXME make configurable?
|
||||
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
currentYear := f.dirTime().Year()
|
||||
for year := 2000; year <= currentYear; year++ {
|
||||
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return the months in a given year
|
||||
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
year := match[1]
|
||||
for month := 1; month <= 12; month++ {
|
||||
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return the days in a given year
|
||||
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
year := match[1]
|
||||
current, err := time.Parse("2006", year)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("bad year %q", match[1])
|
||||
}
|
||||
currentYear := current.Year()
|
||||
for current.Year() == currentYear {
|
||||
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
|
||||
current = current.AddDate(0, 0, 1)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// This creates a search filter on year/month/day as provided
|
||||
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
||||
year, err := strconv.Atoi(match[1])
|
||||
if err != nil || year < 1000 || year > 3000 {
|
||||
return sf, errors.Errorf("bad year %q", match[1])
|
||||
}
|
||||
sf = api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Year: year,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if len(match) >= 3 {
|
||||
month, err := strconv.Atoi(match[2])
|
||||
if err != nil || month < 1 || month > 12 {
|
||||
return sf, errors.Errorf("bad month %q", match[2])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Month = month
|
||||
}
|
||||
if len(match) >= 4 {
|
||||
day, err := strconv.Atoi(match[3])
|
||||
if err != nil || day < 1 || day > 31 {
|
||||
return sf, errors.Errorf("bad day %q", match[3])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Day = day
|
||||
}
|
||||
return sf, nil
|
||||
}
|
||||
|
||||
// Turns an albumPath into entries
|
||||
//
|
||||
// These can either be synthetic directory entries if the album path
|
||||
// is a prefix of another album, or actual files, or a combination of
|
||||
// the two.
|
||||
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
|
||||
albums, err := f.listAlbums(shared)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Put in the directories
|
||||
dirs, foundAlbumPath := albums.getDirs(albumPath)
|
||||
if foundAlbumPath {
|
||||
for _, dir := range dirs {
|
||||
d := fs.NewDir(prefix+dir, f.dirTime())
|
||||
dirPath := path.Join(albumPath, dir)
|
||||
// if this dir is an album add more special stuff
|
||||
album, ok := albums.get(dirPath)
|
||||
if ok {
|
||||
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Error reading media count: %v", err)
|
||||
}
|
||||
d.SetID(album.ID).SetItems(count)
|
||||
}
|
||||
entries = append(entries, d)
|
||||
}
|
||||
}
|
||||
// if this is an album then return a filter to list it
|
||||
album, foundAlbum := albums.get(albumPath)
|
||||
if foundAlbum {
|
||||
filter := api.SearchFilter{AlbumID: album.ID}
|
||||
newEntries, err := f.listDir(ctx, prefix, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, newEntries...)
|
||||
}
|
||||
if !foundAlbumPath && !foundAlbum && albumPath != "" {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
495
backend/googlephotos/pattern_test.go
Normal file
495
backend/googlephotos/pattern_test.go
Normal file
@@ -0,0 +1,495 @@
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// time for directories
|
||||
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
|
||||
|
||||
// mock Fs for testing patterns
|
||||
type testLister struct {
|
||||
t *testing.T
|
||||
albums *albums
|
||||
names []string
|
||||
uploaded dirtree.DirTree
|
||||
}
|
||||
|
||||
// newTestLister makes a mock for testing
|
||||
func newTestLister(t *testing.T) *testLister {
|
||||
return &testLister{
|
||||
t: t,
|
||||
albums: newAlbums(),
|
||||
uploaded: dirtree.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// mock listDir for testing
|
||||
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||
for _, name := range f.names {
|
||||
entries = append(entries, mockobject.New(prefix+name))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// mock listAlbums for testing
|
||||
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
|
||||
return f.albums, nil
|
||||
}
|
||||
|
||||
// mock listUploads for testing
|
||||
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, _ = f.uploaded[dir]
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// mock dirTime for testing
|
||||
func (f *testLister) dirTime() time.Time {
|
||||
return startTime
|
||||
}
|
||||
|
||||
func TestPatternMatch(t *testing.T) {
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
root string
|
||||
itemPath string
|
||||
isFile bool
|
||||
// expected output
|
||||
wantMatch []string
|
||||
wantPrefix string
|
||||
wantPattern *dirPattern
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{""},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[0],
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
isFile: true,
|
||||
wantMatch: nil,
|
||||
wantPrefix: "",
|
||||
wantPattern: nil,
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"upload", ""},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[1],
|
||||
},
|
||||
{
|
||||
root: "upload/dir",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"upload/dir", "dir"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[1],
|
||||
},
|
||||
{
|
||||
root: "upload/file.jpg",
|
||||
itemPath: "",
|
||||
isFile: true,
|
||||
wantMatch: []string{"upload/file.jpg", "file.jpg"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[2],
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[3],
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
itemPath: "media",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "media/",
|
||||
wantPattern: &patterns[3],
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[4],
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "all",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "all/",
|
||||
wantPattern: &patterns[4],
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "file.jpg",
|
||||
isFile: true,
|
||||
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
|
||||
wantPrefix: "file.jpg/",
|
||||
wantPattern: &patterns[5],
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
|
||||
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
|
||||
assert.Equal(t, test.wantMatch, gotMatch)
|
||||
assert.Equal(t, test.wantPrefix, gotPrefix)
|
||||
assert.Equal(t, test.wantPattern, gotPattern)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternMatchToEntries(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := newTestLister(t)
|
||||
f.names = []string{"file.jpg"}
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub/one",
|
||||
})
|
||||
f.albums.add(&api.Album{
|
||||
ID: "2",
|
||||
Title: "sub",
|
||||
})
|
||||
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
|
||||
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
|
||||
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
root string
|
||||
itemPath string
|
||||
// expected output
|
||||
wantMatch []string
|
||||
wantPrefix string
|
||||
remotes []string
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
wantMatch: []string{""},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"upload", ""},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"upload/file1.jpg", "upload/dir/"},
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "dir",
|
||||
wantMatch: []string{"upload/dir", "dir"},
|
||||
wantPrefix: "dir/",
|
||||
remotes: []string{"upload/dir/file2.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "all",
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "all/",
|
||||
remotes: []string{"all/file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-year",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-year"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-year/2000",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-year/2000", "2000"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month/2001",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month/2001", "2001"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month/2001/2001-01",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day/2001",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day/2001", "2001"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day/2001/2001-01-02",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "album",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"sub/"},
|
||||
},
|
||||
{
|
||||
root: "album/sub",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album/sub", "sub"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"one/", "file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "album/sub/one",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album/sub/one", "sub/one"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "shared-album",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"sub/"},
|
||||
},
|
||||
{
|
||||
root: "shared-album/sub",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album/sub", "sub"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"one/", "file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "shared-album/sub/one",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album/sub/one", "sub/one"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
|
||||
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
|
||||
assert.Equal(t, test.wantMatch, match)
|
||||
assert.Equal(t, test.wantPrefix, prefix)
|
||||
assert.NotNil(t, pattern)
|
||||
assert.NotNil(t, pattern.toEntries)
|
||||
|
||||
entries, err := pattern.toEntries(ctx, f, prefix, match)
|
||||
assert.NoError(t, err)
|
||||
var remotes = []string{}
|
||||
for _, entry := range entries {
|
||||
remote := entry.Remote()
|
||||
if _, isDir := entry.(fs.Directory); isDir {
|
||||
remote += "/"
|
||||
}
|
||||
remotes = append(remotes, remote)
|
||||
if len(remotes) >= 4 {
|
||||
break // only test first 4 entries
|
||||
}
|
||||
}
|
||||
assert.Equal(t, test.remotes, remotes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternYears(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := years(context.Background(), f, "potato/", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
year := 2000
|
||||
for _, entry := range entries {
|
||||
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
|
||||
year++
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternMonths(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 12, len(entries))
|
||||
for i, entry := range entries {
|
||||
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternDays(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 366, len(entries))
|
||||
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
|
||||
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
|
||||
}
|
||||
|
||||
func TestPatternYearMonthDayFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := newTestLister(t)
|
||||
|
||||
// Years
|
||||
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
|
||||
require.Error(t, err)
|
||||
|
||||
// Months
|
||||
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Month: 1,
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
|
||||
require.Error(t, err)
|
||||
|
||||
// Days
|
||||
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Day: 2,
|
||||
Month: 1,
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestPatternAlbumsToEntries(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub/one",
|
||||
})
|
||||
|
||||
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "potato/one", entries[0].Remote())
|
||||
_, ok := entries[0].(fs.Directory)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub",
|
||||
})
|
||||
f.names = []string{"file.jpg"}
|
||||
|
||||
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(entries))
|
||||
assert.Equal(t, "potato/one", entries[0].Remote())
|
||||
_, ok = entries[0].(fs.Directory)
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
|
||||
_, ok = entries[1].(fs.Object)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
}
|
||||
BIN
backend/googlephotos/testfiles/rclone-test-image1.jpg
Normal file
BIN
backend/googlephotos/testfiles/rclone-test-image1.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
BIN
backend/googlephotos/testfiles/rclone-test-image2.jpg
Normal file
BIN
backend/googlephotos/testfiles/rclone-test-image2.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
@@ -5,7 +5,9 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -13,13 +15,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/net/html"
|
||||
)
|
||||
|
||||
@@ -40,7 +42,26 @@ func init() {
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}, {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}, {
|
||||
Name: "no_slash",
|
||||
Help: `Set this if the site doesn't end directories with /
|
||||
|
||||
Use this if your target website does not use / on the end of
|
||||
directories.
|
||||
|
||||
A / on the end of a path is how rclone normally tells the difference
|
||||
between files and directories. If this flag is set, then rclone will
|
||||
treat all files with Content-Type: text/html as directories and read
|
||||
URLs from them rather than downloading them.
|
||||
|
||||
Note that this may cause rclone to confuse genuine HTML files with
|
||||
directories.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -49,6 +70,7 @@ func init() {
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
@@ -186,14 +208,14 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// NewObject creates a new remote http file object
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Stat failed")
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -248,7 +270,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
||||
}
|
||||
// calculate the name relative to the base
|
||||
name = u.Path[len(base.Path):]
|
||||
// musn't be empty
|
||||
// mustn't be empty
|
||||
if name == "" {
|
||||
return "", errNameIsEmpty
|
||||
}
|
||||
@@ -267,14 +289,20 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var walk func(*html.Node)
|
||||
var (
|
||||
walk func(*html.Node)
|
||||
seen = make(map[string]struct{})
|
||||
)
|
||||
walk = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "href" {
|
||||
name, err := parseName(base, a.Val)
|
||||
if err == nil {
|
||||
names = append(names, name)
|
||||
if _, found := seen[name]; !found {
|
||||
names = append(names, name)
|
||||
seen[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
@@ -299,14 +327,16 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
res, err := f.httpClient.Get(URL)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
if err == nil {
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
|
||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||
switch contentType {
|
||||
@@ -330,7 +360,7 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||
dir += "/"
|
||||
}
|
||||
@@ -350,11 +380,16 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if err = file.stat(); err != nil {
|
||||
switch err = file.stat(); err {
|
||||
case nil:
|
||||
entries = append(entries, file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
continue
|
||||
}
|
||||
entries = append(entries, file)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
@@ -365,12 +400,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
@@ -393,7 +428,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
@@ -403,7 +438,7 @@ func (o *Object) Size() int64 {
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime() time.Time {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
@@ -416,6 +451,9 @@ func (o *Object) url() string {
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
@@ -427,13 +465,23 @@ func (o *Object) stat() error {
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
// If NoSlash is set then check ContentType to see if it is a directory
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
|
||||
}
|
||||
if mediaType == "text/html" {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModTime sets the modification and access time to the specified time
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
@@ -443,7 +491,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
@@ -470,27 +518,27 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Remove a remote http file object
|
||||
func (o *Object) Remove() error {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
// +build go1.8
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -14,11 +13,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -65,8 +64,8 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
return f, tidy
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs) {
|
||||
entries, err := f.List("")
|
||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
@@ -93,22 +92,36 @@ func testListRoot(t *testing.T, f fs.Fs) {
|
||||
|
||||
e = entries[3]
|
||||
assert.Equal(t, "two.html", e.Remote())
|
||||
assert.Equal(t, int64(7), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
if noSlash {
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
} else {
|
||||
assert.Equal(t, int64(41), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListRoot(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
testListRoot(t, f)
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestListRootNoSlash(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
f.(*Fs).opt.NoSlash = true
|
||||
defer tidy()
|
||||
|
||||
testListRoot(t, f, true)
|
||||
}
|
||||
|
||||
func TestListSubDir(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
entries, err := f.List("three")
|
||||
entries, err := f.List(context.Background(), "three")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
@@ -126,7 +139,7 @@ func TestNewObject(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject("four/under four.txt")
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||
@@ -136,7 +149,7 @@ func TestNewObject(t *testing.T) {
|
||||
|
||||
// Test the time is correct on the object
|
||||
|
||||
tObj := o.ModTime()
|
||||
tObj := o.ModTime(context.Background())
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
@@ -144,17 +157,22 @@ func TestNewObject(t *testing.T) {
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject(context.Background(), "not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject("four/under four.txt")
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test normal read
|
||||
fd, err := o.Open()
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
@@ -162,7 +180,7 @@ func TestOpen(t *testing.T) {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
@@ -174,12 +192,12 @@ func TestMimeType(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject("four/under four.txt")
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
do, ok := o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
@@ -189,7 +207,7 @@ func TestIsAFileRoot(t *testing.T) {
|
||||
f, err := NewFs(remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f)
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
@@ -199,7 +217,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
entries, err := f.List("")
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
@@ -1 +1 @@
|
||||
potato
|
||||
<a href="two.html/file.txt">file.txt</a>
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td> </td></tr>
|
||||
<tr><th colspan="5"><hr></th></tr>
|
||||
<!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
|
||||
<!-- some extras from https://github.com/rclone/rclone/issues/1573 -->
|
||||
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
||||
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift
|
||||
|
||||
@@ -9,20 +9,22 @@ package hubic
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
swiftLib "github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
@@ -124,7 +126,9 @@ func (f *Fs) getCredentials() (err error) {
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
return errors.Errorf("failed to get credentials: %s", resp.Status)
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
|
||||
@@ -4,14 +4,16 @@ package hubic_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/hubic"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/hubic"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
SkipObjectCheckWrap: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -9,7 +9,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// default time format for almost all request and responses
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
// the API server seems to use a different format
|
||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
@@ -40,6 +43,9 @@ func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
type Flag bool
|
||||
|
||||
@@ -58,6 +64,15 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
||||
return attr, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>
|
||||
|
||||
@@ -265,3 +280,43 @@ func (e *Error) Error() string {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
type DeviceRegistrationResponse struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
}
|
||||
|
||||
@@ -2,11 +2,14 @@ package jottacloud
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -15,59 +18,215 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/jottacloud/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/jottacloud/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Sync"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com"
|
||||
shareURL = "https://www.jottacloud.com/"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/files/v1/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configUsername = "user"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tokenURL,
|
||||
TokenURL: tokenURL,
|
||||
},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
// needs to be done early so we can use oauth during config
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "jottacloud",
|
||||
Description: "JottaCloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
|
||||
// ask if we should create a device specifc token: https://github.com/rclone/rclone/issues/2995
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm() {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
// all information comes from https://github.com/ttyridal/aiojotta/wiki/Jotta-protocol-3.-Authentication#token-authentication
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration api.DeviceRegistrationResponse
|
||||
_, err := srv.CallJSON(&opts, nil, &deviceRegistration)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
}
|
||||
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
username, ok := m.Get(configUsername)
|
||||
if !ok {
|
||||
log.Fatalf("No username defined")
|
||||
}
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get resource token: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var token oauth2.Token
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
|
||||
// finally save them in the config
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm() {
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
|
||||
acc, err := getAccountInfo(srv, username)
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting devices: %s", err)
|
||||
}
|
||||
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
||||
var deviceNames []string
|
||||
for i := range acc.Devices {
|
||||
deviceNames = append(deviceNames, acc.Devices[i].Name)
|
||||
}
|
||||
result := config.Choose("Devices", deviceNames, nil, false)
|
||||
m.Set(configDevice, result)
|
||||
|
||||
dev, err := getDeviceInfo(srv, path.Join(username, result))
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting Mountpoint: %s", err)
|
||||
}
|
||||
if len(dev.MountPoints) == 0 {
|
||||
log.Fatalf("No Mountpoints found for this device.")
|
||||
}
|
||||
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
|
||||
var mountpointNames []string
|
||||
for i := range dev.MountPoints {
|
||||
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
|
||||
}
|
||||
result = config.Choose("Mountpoints", mountpointNames, nil, false)
|
||||
m.Set(configMountpoint, result)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User Name",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "mountpoint",
|
||||
Help: "The mountpoint to use.",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Sync",
|
||||
Help: "Will be synced by the official client.",
|
||||
}, {
|
||||
Value: "Archive",
|
||||
Help: "Archive",
|
||||
}},
|
||||
Name: configUsername,
|
||||
Help: "User Name:",
|
||||
}, {
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
@@ -83,6 +242,11 @@ func init() {
|
||||
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_resume_limit",
|
||||
Help: "Files bigger than this can be resumed if the upload fail's.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -90,23 +254,26 @@ func init() {
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Device string `config:"device"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Unlink bool `config:"unlink"`
|
||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||
}
|
||||
|
||||
// Fs represents a remote jottacloud
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
pacer *pacer.Pacer
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *fs.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
// Object describes a jottacloud object
|
||||
@@ -195,18 +362,31 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getAccountInfo retrieves account information
|
||||
func (f *Fs) getAccountInfo() (info *api.AccountInfo, err error) {
|
||||
// getAccountInfo queries general information about the account.
|
||||
// Takes rest.Client and username as parameter to be easily usable
|
||||
// during config
|
||||
func getAccountInfo(srv *rest.Client, username string) (info *api.AccountInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: urlPathEscape(f.user),
|
||||
Path: urlPathEscape(username),
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getDeviceInfo queries Information about a jottacloud device
|
||||
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: urlPathEscape(path),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -215,12 +395,18 @@ func (f *Fs) getAccountInfo() (info *api.AccountInfo, err error) {
|
||||
}
|
||||
|
||||
// setEndpointUrl reads the account id and generates the API endpoint URL
|
||||
func (f *Fs) setEndpointURL(mountpoint string) (err error) {
|
||||
info, err := f.getAccountInfo()
|
||||
func (f *Fs) setEndpointURL() (err error) {
|
||||
info, err := getAccountInfo(f.srv, f.user)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get endpoint url")
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
|
||||
if f.opt.Device == "" {
|
||||
f.opt.Device = defaultDevice
|
||||
}
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(info.Username, f.opt.Device, f.opt.Mountpoint))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -261,6 +447,29 @@ func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
//
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if tokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = req.Body.Close()
|
||||
|
||||
// make the refresh token upper case
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -273,25 +482,40 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
if opt.Pass != "" {
|
||||
var err error
|
||||
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
do.SetRequestFilter(grantTypeFilter)
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
user: opt.User,
|
||||
opt: *opt,
|
||||
//endpointURL: rest.URLPathEscape(path.Join(user, defaultDevice, opt.Mountpoint)),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
name: name,
|
||||
root: root,
|
||||
user: opt.User,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -299,15 +523,15 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
}).Fill(f)
|
||||
|
||||
if user == "" || pass == "" {
|
||||
return nil, errors.New("jottacloud needs user and password")
|
||||
}
|
||||
|
||||
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
err = f.setEndpointURL(opt.Mountpoint)
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath("")
|
||||
return err
|
||||
})
|
||||
|
||||
err = f.setEndpointURL()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get account info")
|
||||
}
|
||||
@@ -319,7 +543,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
_, err := f.NewObject(context.TODO(), remote)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
@@ -331,7 +555,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -348,7 +571,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
err = o.readMetaData(false) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -358,7 +581,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
@@ -395,8 +618,8 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//fmt.Printf("List: %s\n", dir)
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
//fmt.Printf("List: %s\n", f.filePath(dir))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(dir),
|
||||
@@ -512,7 +735,7 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(dir),
|
||||
@@ -565,14 +788,17 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := f.createObject(src.Remote(), src.ModTime(), src.Size())
|
||||
return o, o.Update(in, src, options...)
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if f.opt.Device != "Jotta" {
|
||||
return nil, errors.New("upload not supported for devices other than Jotta")
|
||||
}
|
||||
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// mkParentDir makes the parent of the native path dirPath if
|
||||
// necessary and any directories above that
|
||||
func (f *Fs) mkParentDir(dirPath string) error {
|
||||
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// chop off trailing / if it exists
|
||||
if strings.HasSuffix(dirPath, "/") {
|
||||
@@ -582,25 +808,25 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
return f.Mkdir(parent)
|
||||
return f.Mkdir(ctx, parent)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
_, err := f.CreateDir(dir)
|
||||
return err
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) (err error) {
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
|
||||
root := path.Join(f.root, dir)
|
||||
if root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
|
||||
// check that the directory exists
|
||||
entries, err := f.List(dir)
|
||||
entries, err := f.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -640,8 +866,8 @@ func (f *Fs) purgeCheck(dir string, check bool) (err error) {
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
@@ -654,11 +880,11 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copys or moves directories or files depending on the mthod parameter
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -676,7 +902,6 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -689,14 +914,14 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err := f.mkParentDir(remote)
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -719,14 +944,14 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err := f.mkParentDir(remote)
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -748,7 +973,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
@@ -765,7 +990,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
//fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath)
|
||||
|
||||
var err error
|
||||
_, err = f.List(dstRemote)
|
||||
_, err = f.List(ctx, dstRemote)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
@@ -783,7 +1008,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(remote),
|
||||
@@ -824,13 +1049,13 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
if result.PublicSharePath == "" {
|
||||
return "", errors.New("couldn't create public link - no link path received")
|
||||
}
|
||||
link = path.Join(shareURL, result.PublicSharePath)
|
||||
link = path.Join(baseURL, result.PublicSharePath)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
info, err := f.getAccountInfo()
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := getAccountInfo(f.srv, f.user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -871,7 +1096,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -880,7 +1105,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -889,28 +1114,31 @@ func (o *Object) Size() int64 {
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = int64(info.Size)
|
||||
o.size = info.Size
|
||||
o.md5 = info.MD5
|
||||
o.mimeType = info.MimeType
|
||||
o.modTime = time.Time(info.ModifiedAt)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
func (o *Object) readMetaData(force bool) (err error) {
|
||||
if o.hasMetaData && !force {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Deleted {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
@@ -918,8 +1146,8 @@ func (o *Object) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -928,7 +1156,7 @@ func (o *Object) ModTime() time.Time {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
@@ -938,7 +1166,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
@@ -967,7 +1195,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need a MD5
|
||||
md5Hasher := md5.New()
|
||||
// use the teeReader to write to the local file AND caclulate the MD5 while doing so
|
||||
// use the teeReader to write to the local file AND calculate the MD5 while doing so
|
||||
teeReader := io.TeeReader(in, md5Hasher)
|
||||
|
||||
// nothing to clean up by default
|
||||
@@ -1022,9 +1250,9 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
md5String, err := src.Hash(hash.MD5)
|
||||
md5String, err := src.Hash(ctx, hash.MD5)
|
||||
if err != nil || md5String == "" {
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
@@ -1040,47 +1268,78 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
in = wrap(in)
|
||||
}
|
||||
|
||||
// use the api to allocate the file first and get resume / deduplication info
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
Body: in,
|
||||
ContentType: fs.MimeType(src),
|
||||
ContentLength: &size,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
Parameters: url.Values{},
|
||||
Method: "POST",
|
||||
Path: "allocate",
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||
|
||||
// the allocate request
|
||||
var request = api.AllocateFileRequest{
|
||||
Bytes: size,
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: md5String,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, replaceReservedChars(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
opts.ExtraHeaders["JMd5"] = md5String
|
||||
opts.Parameters.Set("cphash", md5String)
|
||||
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
|
||||
// opts.ExtraHeaders["JCreated"] = api.Time(src.ModTime()).String()
|
||||
opts.ExtraHeaders["JModified"] = api.Time(src.ModTime()).String()
|
||||
|
||||
// Parameters observed in other implementations
|
||||
//opts.ExtraHeaders["X-Jfs-DeviceName"] = "Jotta"
|
||||
//opts.ExtraHeaders["X-Jfs-Devicename-Base64"] = ""
|
||||
//opts.ExtraHeaders["X-Jftp-Version"] = "2.4" this appears to be the current version
|
||||
//opts.ExtraHeaders["jx_csid"] = ""
|
||||
//opts.ExtraHeaders["jx_lisence"] = ""
|
||||
|
||||
opts.Parameters.Set("umode", "nomultipart")
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Check returned Metadata? Timeout on big uploads?
|
||||
return o.setMetaData(&result)
|
||||
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
|
||||
if response.State != "COMPLETED" {
|
||||
// how much do we still have to upload?
|
||||
remainingBytes := size - response.ResumePos
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: response.UploadURL,
|
||||
ContentLength: &remainingBytes,
|
||||
ContentType: "application/octet-stream",
|
||||
Body: in,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
if response.ResumePos != 0 {
|
||||
opts.ExtraHeaders["Range"] = "bytes=" + strconv.FormatInt(response.ResumePos, 10) + "-" + strconv.FormatInt(size-1, 10)
|
||||
}
|
||||
|
||||
// copy the already uploaded bytes into the trash :)
|
||||
var result api.UploadResponse
|
||||
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// send the remaining bytes
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// finally update the meta data
|
||||
o.hasMetaData = true
|
||||
o.size = result.Bytes
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata
|
||||
return o.readMetaData(true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -4,8 +4,8 @@ package jottacloud_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/jottacloud"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/jottacloud"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaClous reserved characters, and can't
|
||||
The following characters are JottaCloud reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
|
||||
605
backend/koofr/koofr.go
Normal file
605
backend/koofr/koofr.go
Normal file
@@ -0,0 +1,605 @@
|
||||
package koofr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
koofrclient "github.com/koofr/go-koofrclient"
|
||||
)
|
||||
|
||||
// Register Fs with rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
Password string `config:"password"`
|
||||
SetMTime bool `config:"setmtime"`
|
||||
}
|
||||
|
||||
// A Fs is a representation of a remote Koofr Fs
|
||||
type Fs struct {
|
||||
name string
|
||||
mountID string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
client *koofrclient.KoofrClient
|
||||
}
|
||||
|
||||
// An Object on the remote Koofr Fs
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
info koofrclient.FileInfo
|
||||
}
|
||||
|
||||
func base(pth string) string {
|
||||
rv := path.Base(pth)
|
||||
if rv == "" || rv == "." {
|
||||
rv = "/"
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
func dir(pth string) string {
|
||||
rv := path.Dir(pth)
|
||||
if rv == "" || rv == "." {
|
||||
rv = "/"
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
// String returns a string representation of the remote Object
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path of the Object, relative to Fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the Object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
|
||||
}
|
||||
|
||||
// Size return the size of the Object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Size
|
||||
}
|
||||
|
||||
// Fs returns a reference to the Koofr Fs containing the Object
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns an MD5 hash of the Object
|
||||
func (o *Object) Hash(ctx context.Context, typ hash.Type) (string, error) {
|
||||
if typ == hash.MD5 {
|
||||
return o.info.Hash, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// fullPath returns full path of the remote Object (including Fs root)
|
||||
func (o *Object) fullPath() string {
|
||||
return o.fs.fullPath(o.remote)
|
||||
}
|
||||
|
||||
// Storable returns true if the Object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime is not supported
|
||||
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
return fs.ErrorCantSetModTimeWithoutDelete
|
||||
}
|
||||
|
||||
// Open opens the Object for reading
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var sOff, eOff int64 = 0, -1
|
||||
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
sOff = x.Offset
|
||||
case *fs.RangeOption:
|
||||
sOff = x.Start
|
||||
eOff = x.End
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if sOff == 0 && eOff < 0 {
|
||||
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
if sOff < 0 {
|
||||
sOff = o.Size() - eOff
|
||||
eOff = o.Size()
|
||||
}
|
||||
if eOff > o.Size() {
|
||||
eOff = o.Size()
|
||||
}
|
||||
span := &koofrclient.FileSpan{
|
||||
Start: sOff,
|
||||
End: eOff,
|
||||
}
|
||||
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
|
||||
}
|
||||
|
||||
// Update updates the Object contents
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
|
||||
putopts := &koofrclient.PutOptions{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
OverwriteIgnoreNonExisting: true,
|
||||
SetModified: &mtime,
|
||||
}
|
||||
fullPath := o.fullPath()
|
||||
dirPath := dir(fullPath)
|
||||
name := base(fullPath)
|
||||
err := o.fs.mkdir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := o.fs.client.FilesPutWithOptions(o.fs.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.info = *info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the remote Object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
|
||||
// Name returns the name of the Fs
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root path of the Fs
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a string representation of the Fs
|
||||
func (f *Fs) String() string {
|
||||
return "koofr:" + f.mountID + ":" + f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features supported by this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision denotes that setting modification times is not supported
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if !f.opt.SetMTime {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// Hashes returns a set of hashes are Provided by the Fs
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||
func (f *Fs) fullPath(part string) string {
|
||||
return path.Join("/", f.root, part)
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
client.HTTPClient.Headers.Set("Authorization", basicAuth)
|
||||
mounts, err := client.Mounts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
client: client,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
for _, m := range mounts {
|
||||
if opt.MountID != "" {
|
||||
if m.Id == opt.MountID {
|
||||
f.mountID = m.Id
|
||||
break
|
||||
}
|
||||
} else if m.IsPrimary {
|
||||
f.mountID = m.Id
|
||||
break
|
||||
}
|
||||
}
|
||||
if f.mountID == "" {
|
||||
if opt.MountID == "" {
|
||||
return nil, errors.New("Failed to find primary mount")
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
f.root = dir(f.root)
|
||||
err = fs.ErrorIsFile
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// List returns a list of items in a directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return nil, translateErrorsDir(err)
|
||||
}
|
||||
entries = make([]fs.DirEntry, len(files))
|
||||
for i, file := range files {
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
info: file,
|
||||
remote: path.Join(dir, file.Name),
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote Object for a given remote path
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err error) {
|
||||
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
if info.Type == "dir" {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
info: info,
|
||||
remote: remote,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put updates a remote Object
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
|
||||
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
|
||||
putopts := &koofrclient.PutOptions{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
OverwriteIgnoreNonExisting: true,
|
||||
SetModified: &mtime,
|
||||
}
|
||||
fullPath := f.fullPath(src.Remote())
|
||||
dirPath := dir(fullPath)
|
||||
name := base(fullPath)
|
||||
err = f.mkdir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.client.FilesPutWithOptions(f.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
info: *info,
|
||||
remote: src.Remote(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutStream updates a remote Object with a stream of unknown size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// isBadRequest is a predicate which holds true iff the error returned was
|
||||
// HTTP status 400
|
||||
func isBadRequest(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusBadRequest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// translateErrorsDir translates koofr errors to rclone errors (for a dir
|
||||
// operation)
|
||||
func translateErrorsDir(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
|
||||
func translateErrorsObject(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// neccessary
|
||||
func (f *Fs) mkdir(fullPath string) error {
|
||||
if fullPath == "/" {
|
||||
return nil
|
||||
}
|
||||
info, err := f.client.FilesInfo(f.mountID, fullPath)
|
||||
if err == nil && info.Type == "dir" {
|
||||
return nil
|
||||
}
|
||||
err = translateErrorsDir(err)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
return err
|
||||
}
|
||||
dirs := strings.Split(fullPath, "/")
|
||||
parent := "/"
|
||||
for _, part := range dirs {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
|
||||
if err != nil || info.Type != "dir" {
|
||||
err = translateErrorsDir(err)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
return err
|
||||
}
|
||||
err = f.client.FilesNewFolder(f.mountID, parent, part)
|
||||
if err != nil && !isBadRequest(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
parent = path.Join(parent, part)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// necessary
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fullPath := f.fullPath(dir)
|
||||
return f.mkdir(fullPath)
|
||||
}
|
||||
|
||||
// Rmdir removes an (empty) directory at the given remote path
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
}
|
||||
if len(files) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy copies a remote Object to the given path
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
|
||||
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
|
||||
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
|
||||
f.mountID, dstFullPath, koofrclient.CopyOptions{SetModified: &mtime})
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Move moves a remote Object to the given path
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj := src.(*Object)
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err = f.client.FilesMove(srcObj.fs.mountID,
|
||||
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// DirMove moves a remote directory to the given path
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs := src.(*Fs)
|
||||
srcFullPath := srcFs.fullPath(srcRemote)
|
||||
dstFullPath := f.fullPath(dstRemote)
|
||||
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// About reports space usage (with a MB precision)
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
mount, err := f.client.MountsDetails(f.mountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
|
||||
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
|
||||
Trashed: nil,
|
||||
Other: nil,
|
||||
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
|
||||
Objects: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Purge purges the complete Fs
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
|
||||
return err
|
||||
}
|
||||
|
||||
// linkCreate is a Koofr API request for creating a public link
|
||||
type linkCreate struct {
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// link is a Koofr API response to creating a public link
|
||||
type link struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Counter int64 `json:"counter"`
|
||||
URL string `json:"url"`
|
||||
ShortURL string `json:"shortUrl"`
|
||||
Hash string `json:"hash"`
|
||||
Host string `json:"host"`
|
||||
HasPassword bool `json:"hasPassword"`
|
||||
Password string `json:"password"`
|
||||
ValidFrom int64 `json:"validFrom"`
|
||||
ValidTo int64 `json:"validTo"`
|
||||
PasswordRequired bool `json:"passwordRequired"`
|
||||
}
|
||||
|
||||
// createLink makes a Koofr API call to create a public link
|
||||
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
|
||||
linkCreate := linkCreate{
|
||||
Path: path,
|
||||
}
|
||||
linkData := link{}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "POST",
|
||||
Path: "/api/v2/mounts/" + mountID + "/links",
|
||||
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: linkCreate,
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &linkData,
|
||||
}
|
||||
|
||||
_, err := c.Request(&request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &linkData, nil
|
||||
}
|
||||
|
||||
// PublicLink creates a public link to the remote path
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return "", translateErrorsDir(err)
|
||||
}
|
||||
return linkData.ShortURL, nil
|
||||
}
|
||||
14
backend/koofr/koofr_test.go
Normal file
14
backend/koofr/koofr_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package koofr_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestKoofr:",
|
||||
})
|
||||
}
|
||||
@@ -3,20 +3,21 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var s syscall.Statfs_t
|
||||
err := syscall.Statfs(f.root, &s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize)
|
||||
bs := int64(s.Bsize) // nolint: unconvert
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||
|
||||
@@ -3,17 +3,18 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
_, _, e1 := getFreeDiskSpace.Call(
|
||||
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||
|
||||
20
backend/local/lchtimes.go
Normal file
20
backend/local/lchtimes.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// +build windows plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = false
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
28
backend/local/lchtimes_unix.go
Normal file
28
backend/local/lchtimes_unix.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// +build !windows,!plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
var utimes [2]unix.Timespec
|
||||
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
|
||||
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
|
||||
if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lchtimes", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2,6 +2,8 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -15,18 +17,21 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
@@ -48,6 +53,13 @@ func init() {
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
@@ -74,7 +86,7 @@ are being uploaded and aborts with a message which starts "can't copy
|
||||
- source file is being updated" if the file changes during upload.
|
||||
|
||||
However on some file systems this modification time check may fail (eg
|
||||
[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this
|
||||
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
||||
check can be disabled with this flag.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
@@ -85,6 +97,24 @@ check can be disabled with this flag.`,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -92,12 +122,15 @@ check can be disabled with this flag.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
CaseSensitive bool `config:"case_sensitive"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
@@ -119,17 +152,20 @@ type Fs struct {
|
||||
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
translatedLink bool // Is this object a translated link
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -138,6 +174,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
@@ -165,7 +204,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -174,6 +213,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Determine whether a file is a 'regular' file,
|
||||
// Symlinks are regular files, only if the TranslateSymlink
|
||||
// option is in-effect
|
||||
func (f *Fs) isRegular(mode os.FileMode) bool {
|
||||
if !f.opt.TranslateSymlinks {
|
||||
return mode.IsRegular()
|
||||
}
|
||||
|
||||
// fi.Mode().IsRegular() tests that all mode bits are zero
|
||||
// Since symlinks are accepted, test that all other bits are zero,
|
||||
// except the symlink bit
|
||||
return mode&os.ModeType&^os.ModeSymlink == 0
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
@@ -194,28 +247,54 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// caseInsenstive returns whether the remote is case insensitive or not
|
||||
// caseInsensitive returns whether the remote is case insensitive or not
|
||||
func (f *Fs) caseInsensitive() bool {
|
||||
if f.opt.CaseSensitive {
|
||||
return false
|
||||
}
|
||||
if f.opt.CaseInsensitive {
|
||||
return true
|
||||
}
|
||||
// FIXME not entirely accurate since you can have case
|
||||
// sensitive Fses on darwin and case insenstive Fses on linux.
|
||||
// sensitive Fses on darwin and case insensitive Fses on linux.
|
||||
// Should probably check but that would involve creating a
|
||||
// file in the remote to be most accurate which probably isn't
|
||||
// desirable.
|
||||
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
|
||||
}
|
||||
|
||||
// translateLink checks whether the remote is a translated link
|
||||
// and returns a new path, removing the suffix as needed,
|
||||
// It also returns whether this is a translated link at all
|
||||
//
|
||||
// for regular files, dstPath is returned unchanged
|
||||
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
|
||||
return newDstPath, isTranslatedLink
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
//
|
||||
// if dstPath is empty then it is made from remote
|
||||
func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
translatedLink := false
|
||||
|
||||
if dstPath == "" {
|
||||
dstPath = f.cleanPath(filepath.Join(f.root, remote))
|
||||
}
|
||||
remote = f.cleanRemote(remote)
|
||||
|
||||
if f.opt.TranslateSymlinks {
|
||||
// Possibly receive a new name for dstPath
|
||||
dstPath, translatedLink = translateLink(remote, dstPath)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
translatedLink: translatedLink,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,6 +316,11 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
|
||||
@@ -246,7 +330,7 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, "", nil)
|
||||
}
|
||||
|
||||
@@ -259,7 +343,8 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
dir = f.dirNames.Load(dir)
|
||||
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
||||
remote := f.cleanRemote(dir)
|
||||
@@ -270,7 +355,14 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
fd, err := os.Open(fsDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open directory %q", dir)
|
||||
isPerm := os.IsPermission(err)
|
||||
err = errors.Wrapf(err, "failed to open directory %q", dir)
|
||||
fs.Errorf(dir, "%v", err)
|
||||
if isPerm {
|
||||
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
||||
err = nil // ignore error but fail sync
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
cerr := fd.Close()
|
||||
@@ -280,12 +372,38 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}()
|
||||
|
||||
for {
|
||||
fis, err := fd.Readdir(1024)
|
||||
if err == io.EOF && len(fis) == 0 {
|
||||
break
|
||||
var fis []os.FileInfo
|
||||
if useReadDir {
|
||||
// Windows and Plan9 read the directory entries with the stat information in which
|
||||
// shouldn't fail because of unreadable entries.
|
||||
fis, err = fd.Readdir(1024)
|
||||
if err == io.EOF && len(fis) == 0 {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// For other OSes we read the names only (which shouldn't fail) then stat the
|
||||
// individual ourselves so we can log errors but not fail the directory read.
|
||||
var names []string
|
||||
names, err = fd.Readdirnames(1024)
|
||||
if err == io.EOF && len(names) == 0 {
|
||||
break
|
||||
}
|
||||
if err == nil {
|
||||
for _, name := range names {
|
||||
namepath := filepath.Join(fsDirPath, name)
|
||||
fi, fierr := os.Lstat(namepath)
|
||||
if fierr != nil {
|
||||
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
}
|
||||
fis = append(fis, fi)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read directory %q", dir)
|
||||
return nil, errors.Wrap(err, "failed to read directory entry")
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
@@ -300,7 +418,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// Skip bad symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||
accounting.Stats.Error(err)
|
||||
accounting.Stats(ctx).Error(err)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
@@ -316,6 +434,10 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -386,11 +508,11 @@ func (m *mapper) Save(in, out string) string {
|
||||
}
|
||||
|
||||
// Put the Object to the local filesystem
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(remote, "")
|
||||
err := o.Update(in, src, options...)
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -398,12 +520,12 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||
err := os.MkdirAll(root, 0777)
|
||||
@@ -423,7 +545,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Rmdir removes the directory
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||
return os.Remove(root)
|
||||
}
|
||||
@@ -479,7 +601,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
}
|
||||
|
||||
// If it matches - have found the precision
|
||||
// fmt.Println("compare", fi.ModTime(), t)
|
||||
// fmt.Println("compare", fi.ModTime(ctx), t)
|
||||
if fi.ModTime().Equal(t) {
|
||||
// fmt.Println("Precision detected as", duration)
|
||||
return duration
|
||||
@@ -493,7 +615,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
fi, err := f.lstat(f.root)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -513,7 +635,7 @@ func (f *Fs) Purge() error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
@@ -529,7 +651,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if !dstObj.mode.IsRegular() {
|
||||
} else if !dstObj.fs.isRegular(dstObj.mode) {
|
||||
// It isn't a file
|
||||
return nil, errors.New("can't move file onto non-file")
|
||||
}
|
||||
@@ -572,7 +694,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
@@ -637,7 +759,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the requested hash of a file as a lowercase hex string
|
||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
// Check that the underlying file hasn't changed
|
||||
oldtime := o.modTime
|
||||
oldsize := o.size
|
||||
@@ -648,14 +770,21 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
|
||||
o.fs.objectHashesMu.Lock()
|
||||
hashes := o.hashes
|
||||
hashValue, hashFound := o.hashes[r]
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
in, err := os.Open(o.path)
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound {
|
||||
var in io.ReadCloser
|
||||
|
||||
if !o.translatedLink {
|
||||
in, err = file.Open(o.path)
|
||||
} else {
|
||||
in, err = o.openTranslatedLink(0, -1)
|
||||
}
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
hashes, err = hash.Stream(in)
|
||||
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to read")
|
||||
@@ -663,11 +792,16 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
if closeErr != nil {
|
||||
return "", errors.Wrap(closeErr, "hash: failed to close")
|
||||
}
|
||||
hashValue = hashes[r]
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.hashes = hashes
|
||||
if o.hashes == nil {
|
||||
o.hashes = hashes
|
||||
} else {
|
||||
o.hashes[r] = hashValue
|
||||
}
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
}
|
||||
return hashes[r], nil
|
||||
return hashValue, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -676,13 +810,18 @@ func (o *Object) Size() int64 {
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime() time.Time {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
err := os.Chtimes(o.path, modTime, modTime)
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
var err error
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, modTime, modTime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, modTime, modTime)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -700,7 +839,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
@@ -761,8 +900,18 @@ func (file *localOpenFile) Close() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns a ReadCloser() object that contains the contents of a symbolic link
|
||||
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
|
||||
// Read the link and return the destination it as the contents of the object
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
@@ -780,7 +929,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
fd, err := os.Open(o.path)
|
||||
// Handle a translated link
|
||||
if o.translatedLink {
|
||||
return o.openTranslatedLink(offset, limit)
|
||||
}
|
||||
|
||||
fd, err := file.Open(o.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -811,8 +965,19 @@ func (o *Object) mkdirAll() error {
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
type nopWriterCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (nwc nopWriterCloser) Close() error {
|
||||
// noop
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
var out io.WriteCloser
|
||||
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -826,15 +991,23 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(src.Size(), out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
var symlinkData bytes.Buffer
|
||||
// If the object is a regular file, create it.
|
||||
// If it is a translated link, just read in the contents, and
|
||||
// then create a symlink
|
||||
if !o.translatedLink {
|
||||
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
out = f
|
||||
} else {
|
||||
out = nopWriterCloser{&symlinkData}
|
||||
}
|
||||
|
||||
// Calculate the hash of the object we are reading as we go along
|
||||
@@ -849,6 +1022,26 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
|
||||
if o.translatedLink {
|
||||
if err == nil {
|
||||
// Remove any current symlink or file, if one exists
|
||||
if _, err := os.Lstat(o.path); err == nil {
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
|
||||
return removeErr
|
||||
}
|
||||
}
|
||||
// Use the contents for the copied object to create a symlink
|
||||
err = os.Symlink(symlinkData.String(), o.path)
|
||||
}
|
||||
|
||||
// only continue if symlink creation succeeded
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fs.Logf(o, "Removing partially written file on error: %v", err)
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
@@ -863,7 +1056,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
// Set the mtime
|
||||
err = o.SetModTime(src.ModTime())
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -872,6 +1065,36 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return o.lstat()
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
// Temporary Object under construction
|
||||
o := f.newObject(remote, "")
|
||||
|
||||
err := o.mkdirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if o.translatedLink {
|
||||
return nil, errors.New("can't open a symlink for random writing")
|
||||
}
|
||||
|
||||
out, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(size, out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// setMetadata sets the file info from the os.FileInfo passed in
|
||||
func (o *Object) setMetadata(info os.FileInfo) {
|
||||
// Don't overwrite the info if we don't need to
|
||||
@@ -897,7 +1120,7 @@ func (o *Object) lstat() error {
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
@@ -1013,10 +1236,11 @@ func cleanWindowsName(f *Fs, name string) string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,14 +1,21 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -38,10 +45,13 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
fd, err := os.Open(path.Join(r.LocalName, filePath))
|
||||
fd, err := file.Open(path.Join(r.LocalName, filePath))
|
||||
if err != nil {
|
||||
t.Fatalf("failed opening file %q: %v", filePath, err)
|
||||
}
|
||||
defer func() {
|
||||
require.NoError(t, fd.Close())
|
||||
}()
|
||||
|
||||
fi, err := fd.Stat()
|
||||
require.NoError(t, err)
|
||||
@@ -72,3 +82,109 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestSymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
f := r.Flocal.(*Fs)
|
||||
dir := f.root
|
||||
|
||||
// Write a file
|
||||
modTime1 := fstest.Time("2001-02-03T04:05:10.123123123Z")
|
||||
file1 := r.WriteFile("file.txt", "hello", modTime1)
|
||||
|
||||
// Write a symlink
|
||||
modTime2 := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
symlinkPath := filepath.Join(dir, "symlink.txt")
|
||||
require.NoError(t, os.Symlink("file.txt", symlinkPath))
|
||||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
|
||||
// Check with no symlink flags
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2d)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
}
|
||||
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
}
|
||||
|
||||
// Check it got the correct contents
|
||||
symlinkPath = filepath.Join(dir, "symlink2.txt")
|
||||
fi, err := os.Lstat(symlinkPath)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, fi.Mode().IsRegular())
|
||||
linkText, err := os.Readlink(symlinkPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkText)
|
||||
|
||||
// Check that NewObject gets the correct object
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
contents, err := ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt", string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
|
||||
// Check reading the object with range
|
||||
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
|
||||
require.NoError(t, err)
|
||||
contents, err = ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt"[2:5+1], string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
}
|
||||
|
||||
func TestSymlinkError(t *testing.T) {
|
||||
m := configmap.Simple{
|
||||
"links": "true",
|
||||
"copy_links": "true",
|
||||
}
|
||||
_, err := NewFs("local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user