mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
3081 Commits
v1.19
...
azure-pipe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd1154828f | ||
|
|
7b687709cf | ||
|
|
812fcbbe2c | ||
|
|
85a15d39d0 | ||
|
|
cc39c4e775 | ||
|
|
4e46d26a0b | ||
|
|
0554daf3d8 | ||
|
|
f633996da6 | ||
|
|
9b74d1beb1 | ||
|
|
62a4bad5d2 | ||
|
|
a9438cf364 | ||
|
|
5ef3c988eb | ||
|
|
78150e82a2 | ||
|
|
6f0cc51eeb | ||
|
|
84e2806c4b | ||
|
|
0386d22cc9 | ||
|
|
0be14120e4 | ||
|
|
95af1f9ccf | ||
|
|
629b7eacd8 | ||
|
|
d3149acc32 | ||
|
|
6a3e301303 | ||
|
|
5be968c0ca | ||
|
|
f1a687c540 | ||
|
|
94ee43fe54 | ||
|
|
c2635e39cc | ||
|
|
8c511ec9cd | ||
|
|
ac0dce78d0 | ||
|
|
f347514f62 | ||
|
|
57d5de6fba | ||
|
|
4ba6532915 | ||
|
|
ff235e4e56 | ||
|
|
68e641f6cf | ||
|
|
53a1a0e3ef | ||
|
|
8243ff8bc8 | ||
|
|
be0464f5f1 | ||
|
|
2d561b51db | ||
|
|
9241a93c2d | ||
|
|
fb32f77bac | ||
|
|
520fb03bfd | ||
|
|
a3449bda30 | ||
|
|
ccc416e62b | ||
|
|
a35aa1360e | ||
|
|
3df9dbf887 | ||
|
|
9af0a704af | ||
|
|
691e5ae5f0 | ||
|
|
5a44bafa4e | ||
|
|
8fdce31700 | ||
|
|
493dfb68fd | ||
|
|
71587344c6 | ||
|
|
8e8b78d7e5 | ||
|
|
266600dba7 | ||
|
|
e4f6ccbff2 | ||
|
|
1f1ab179a6 | ||
|
|
c642531a1e | ||
|
|
19ae053168 | ||
|
|
def790986c | ||
|
|
0a1169e659 | ||
|
|
5433021e8b | ||
|
|
c9f77719e4 | ||
|
|
3cd63a00be | ||
|
|
d7016866e0 | ||
|
|
d72e4105fb | ||
|
|
b4266da4eb | ||
|
|
3f5767b94e | ||
|
|
1510e12659 | ||
|
|
ede03258bc | ||
|
|
7fcbb47b1c | ||
|
|
9cafeeb4b6 | ||
|
|
a1cfe61ffd | ||
|
|
5eebbaaac4 | ||
|
|
bc70bff125 | ||
|
|
cf15b88efa | ||
|
|
dcaee0016a | ||
|
|
387b496d1e | ||
|
|
734f504d5f | ||
|
|
7153909390 | ||
|
|
ea35e807db | ||
|
|
5df5a3b78e | ||
|
|
37c1144b46 | ||
|
|
8d116ba0c9 | ||
|
|
6a3c3d9b89 | ||
|
|
a6dca4c13f | ||
|
|
cc0800a72e | ||
|
|
1be1fc073e | ||
|
|
70c6b01f54 | ||
|
|
7b2b396d37 | ||
|
|
af2596f98b | ||
|
|
61fb326a80 | ||
|
|
de14378734 | ||
|
|
eea1b6de32 | ||
|
|
6bae3595a8 | ||
|
|
dde4dd0198 | ||
|
|
2d0e9885bd | ||
|
|
9ed81ac451 | ||
|
|
3245c0ae0d | ||
|
|
6ff7b2eaab | ||
|
|
38ebdf54be | ||
|
|
6cd7c3b774 | ||
|
|
07e2c3a50f | ||
|
|
cd762f04b8 | ||
|
|
6907242cae | ||
|
|
d61ba7ef78 | ||
|
|
b221d79273 | ||
|
|
940d88b695 | ||
|
|
ca324b5084 | ||
|
|
9f4589a997 | ||
|
|
fc44eb4093 | ||
|
|
a1840f6fc7 | ||
|
|
0cb7130dd2 | ||
|
|
2655bea86f | ||
|
|
08bf8faa2f | ||
|
|
4e64ee38e2 | ||
|
|
276f8cccf6 | ||
|
|
0ae844d1f8 | ||
|
|
4ee6de5c3e | ||
|
|
71a19a1972 | ||
|
|
ba72e62b41 | ||
|
|
5935cb0a29 | ||
|
|
f78cd1e043 | ||
|
|
a2c317b46e | ||
|
|
6a2a075c14 | ||
|
|
628530362a | ||
|
|
4549305fec | ||
|
|
245fed513a | ||
|
|
52332a4b24 | ||
|
|
3087c5d559 | ||
|
|
75606dcc27 | ||
|
|
f3719fe269 | ||
|
|
d2be792d5e | ||
|
|
2793d4b4cc | ||
|
|
30ac9d920a | ||
|
|
6e8e620e71 | ||
|
|
5597d6d871 | ||
|
|
622e0d19ce | ||
|
|
ce400a8fdc | ||
|
|
49c05cb89b | ||
|
|
d533de0f5c | ||
|
|
1a4fe4bc6c | ||
|
|
93207ead9c | ||
|
|
22368b997c | ||
|
|
a5bed67016 | ||
|
|
44f6491731 | ||
|
|
12c2a750f5 | ||
|
|
92bbae5cca | ||
|
|
939b19c3b7 | ||
|
|
64fb4effa7 | ||
|
|
4d195d5a52 | ||
|
|
976a020a2f | ||
|
|
550ab441c5 | ||
|
|
e24cadc7a1 | ||
|
|
903ede52cd | ||
|
|
f681d32996 | ||
|
|
2c72e7f0a2 | ||
|
|
db8cd1a993 | ||
|
|
2890b69c48 | ||
|
|
66b3795eb8 | ||
|
|
45f41c2c4a | ||
|
|
34f03ce590 | ||
|
|
e2fde62cd9 | ||
|
|
4b27c6719b | ||
|
|
fb6966b5fe | ||
|
|
454dfd3c9e | ||
|
|
e1cf551ded | ||
|
|
bd10344d65 | ||
|
|
1aa65d60e1 | ||
|
|
aa81957586 | ||
|
|
b7800e96d7 | ||
|
|
fb1bbecb41 | ||
|
|
e4c2468244 | ||
|
|
ac4c8d8dfc | ||
|
|
e2b6172f7d | ||
|
|
32f2895472 | ||
|
|
1124c423ee | ||
|
|
cd5a2d80ca | ||
|
|
1fe0773da6 | ||
|
|
5a941cdcdc | ||
|
|
62681e45fb | ||
|
|
1a2fb52266 | ||
|
|
ec4e7316f2 | ||
|
|
11264c4fb8 | ||
|
|
25f7f2b60a | ||
|
|
e7c20e0bce | ||
|
|
8ee6034b23 | ||
|
|
206e1caa99 | ||
|
|
f0e439de0d | ||
|
|
e5464a2a35 | ||
|
|
78d38dda56 | ||
|
|
60bb01b22c | ||
|
|
95a74e02c7 | ||
|
|
d014aef011 | ||
|
|
be8c23f0b4 | ||
|
|
da3b685cd8 | ||
|
|
9aac2d6965 | ||
|
|
81fad0f0e3 | ||
|
|
cff85f0b95 | ||
|
|
9c0dac4ccd | ||
|
|
5ccc2dcb8f | ||
|
|
8c5503631a | ||
|
|
2f3d794ec6 | ||
|
|
abeb12c6df | ||
|
|
9c6f3ae82c | ||
|
|
870b15313e | ||
|
|
62a7e44e86 | ||
|
|
296e4936a0 | ||
|
|
a0b9d4a239 | ||
|
|
99bc013c0a | ||
|
|
d9cad9d10b | ||
|
|
0e23c4542f | ||
|
|
f544234e26 | ||
|
|
dbf9800cbc | ||
|
|
1f19b63264 | ||
|
|
5c0e5b85f7 | ||
|
|
edda6d91cd | ||
|
|
1fefa6adfd | ||
|
|
af030f74f5 | ||
|
|
ada8c22a97 | ||
|
|
610466c18c | ||
|
|
9950bb9b7c | ||
|
|
7d70e92664 | ||
|
|
687cbf3ded | ||
|
|
c3af0a1eca | ||
|
|
822483aac5 | ||
|
|
2eb31a4f1d | ||
|
|
0655738da6 | ||
|
|
7c4fe3eb75 | ||
|
|
72721f4c8d | ||
|
|
0c60c00187 | ||
|
|
0d511b7878 | ||
|
|
bd2a7ffcf4 | ||
|
|
7a5ee968e7 | ||
|
|
c809334b3d | ||
|
|
b88e50cc36 | ||
|
|
bbe28df800 | ||
|
|
f865280afa | ||
|
|
8beab1aaf2 | ||
|
|
b9e16b36e5 | ||
|
|
b68c3ce74d | ||
|
|
d04b0b856a | ||
|
|
d0ff07bdb0 | ||
|
|
577fda059d | ||
|
|
49d2ab512d | ||
|
|
9df322e889 | ||
|
|
8f89b03d7b | ||
|
|
48c09608ea | ||
|
|
7963320a29 | ||
|
|
81f8a5e0d9 | ||
|
|
2763598bd1 | ||
|
|
49d7b0d278 | ||
|
|
3d475dc0ee | ||
|
|
2657d70567 | ||
|
|
45df37f55f | ||
|
|
81007c10cb | ||
|
|
aba15f11d8 | ||
|
|
a57756a05c | ||
|
|
eeab7a0a43 | ||
|
|
ac8d1db8d3 | ||
|
|
cd0d43fffb | ||
|
|
cdf12b1fc8 | ||
|
|
7981e450a4 | ||
|
|
e7fc3dcd31 | ||
|
|
2386c5adc1 | ||
|
|
2f21aa86b4 | ||
|
|
16d8014cbb | ||
|
|
613a9bb86b | ||
|
|
8190a81201 | ||
|
|
f5795db6d2 | ||
|
|
e2a2eb349f | ||
|
|
a0d4fdb2fa | ||
|
|
a28239f005 | ||
|
|
b05da61c82 | ||
|
|
41f01da625 | ||
|
|
901811bb26 | ||
|
|
0d4a3520ad | ||
|
|
5855714474 | ||
|
|
120de505a9 | ||
|
|
6e86526c9d | ||
|
|
0862dc9b2b | ||
|
|
1c301f9f7a | ||
|
|
9f6b09dfaf | ||
|
|
3d424c6e08 | ||
|
|
6fb1c8f51c | ||
|
|
626f0d1886 | ||
|
|
9ee9fe3885 | ||
|
|
b0380aad95 | ||
|
|
2065e73d0b | ||
|
|
d3e3bbedf3 | ||
|
|
8d29d69ade | ||
|
|
6e70d88f54 | ||
|
|
595fea757d | ||
|
|
bb80586473 | ||
|
|
0d475958c7 | ||
|
|
2728948fb0 | ||
|
|
3756f211b5 | ||
|
|
2faf2aed80 | ||
|
|
1bd8183af1 | ||
|
|
5aa706831f | ||
|
|
ac7e1dbf62 | ||
|
|
14ef4437e5 | ||
|
|
a0d2ab5b4f | ||
|
|
3bfde5f52a | ||
|
|
2b05bd9a08 | ||
|
|
1318be3b0a | ||
|
|
f4a754a36b | ||
|
|
fef73763aa | ||
|
|
7267d19ad8 | ||
|
|
47099466c0 | ||
|
|
4376019062 | ||
|
|
e5f4210b09 | ||
|
|
d5f2df2f3d | ||
|
|
efd720b533 | ||
|
|
047f00a411 | ||
|
|
bb5ac8efbe | ||
|
|
e62bbf761b | ||
|
|
54a2e99d97 | ||
|
|
28230d93b4 | ||
|
|
3c4407442d | ||
|
|
caf318d499 | ||
|
|
2fbb504b66 | ||
|
|
2b58d1a46f | ||
|
|
1582a21408 | ||
|
|
229898dcee | ||
|
|
95194adfd5 | ||
|
|
4827496234 | ||
|
|
415eeca6cf | ||
|
|
58d9a3e1b5 | ||
|
|
cccadfa7ae | ||
|
|
1b52f8d2a5 | ||
|
|
2078ad68a5 | ||
|
|
368ed9e67d | ||
|
|
7c30993bb7 | ||
|
|
55b9a4ed30 | ||
|
|
118a8b949e | ||
|
|
1d14e30383 | ||
|
|
27714e29c3 | ||
|
|
9f8e1a1dc5 | ||
|
|
1692c6bd0a | ||
|
|
d233efbf63 | ||
|
|
e9a45a5a34 | ||
|
|
f6eb5c6983 | ||
|
|
2bf19787d5 | ||
|
|
0ea3a57ecb | ||
|
|
b353c730d8 | ||
|
|
173dfbd051 | ||
|
|
e3bceb9083 | ||
|
|
52c6b373cc | ||
|
|
0bc0f62277 | ||
|
|
12c8ee4b4b | ||
|
|
5240f9d1e5 | ||
|
|
997654d77d | ||
|
|
f1809451f6 | ||
|
|
84c650818e | ||
|
|
c5775cf73d | ||
|
|
dca482e058 | ||
|
|
6943169cef | ||
|
|
4fddec113c | ||
|
|
2114fd8f26 | ||
|
|
63bb6de491 | ||
|
|
0a56a168ff | ||
|
|
88e22087a8 | ||
|
|
9404ed703a | ||
|
|
c7ecccd5ca | ||
|
|
972e27a861 | ||
|
|
8f4ea77c07 | ||
|
|
61616ba864 | ||
|
|
9ed721a3f6 | ||
|
|
0b9d7fec0c | ||
|
|
240c15883f | ||
|
|
38864adc9c | ||
|
|
5991315990 | ||
|
|
73f0a67d98 | ||
|
|
ffe067d6e7 | ||
|
|
b5f563fb0f | ||
|
|
9310c7f3e2 | ||
|
|
1c1a8ef24b | ||
|
|
2cfbc2852d | ||
|
|
b167d30420 | ||
|
|
ec59760d9c | ||
|
|
076d3da825 | ||
|
|
c3eecbe933 | ||
|
|
d8e5b19ed4 | ||
|
|
43bc381e90 | ||
|
|
fb5ee22112 | ||
|
|
35327dad6f | ||
|
|
ef5e1909a0 | ||
|
|
bca5d8009e | ||
|
|
334f19c974 | ||
|
|
42a5bf1d9f | ||
|
|
71d1890316 | ||
|
|
d29c545627 | ||
|
|
eb85ecc9c4 | ||
|
|
0dc08e1e61 | ||
|
|
76532408ef | ||
|
|
60a4a8a86d | ||
|
|
a0d4c04687 | ||
|
|
f3874707ee | ||
|
|
f8c2689e77 | ||
|
|
8ec55ae20b | ||
|
|
fc1bf5f931 | ||
|
|
578d00666c | ||
|
|
f5c853b5c8 | ||
|
|
23c0cd2482 | ||
|
|
8217f361cc | ||
|
|
a0016e00d1 | ||
|
|
99c37028ee | ||
|
|
cfba337ef0 | ||
|
|
fd370fcad2 | ||
|
|
c680bb3254 | ||
|
|
7d5d6c041f | ||
|
|
bdc638530e | ||
|
|
315cee23a0 | ||
|
|
2135879dda | ||
|
|
da90069462 | ||
|
|
08c4854e00 | ||
|
|
a838add230 | ||
|
|
d68b091170 | ||
|
|
d809bed438 | ||
|
|
3aa1818870 | ||
|
|
96f6708461 | ||
|
|
6641a25f8c | ||
|
|
cd46ce916b | ||
|
|
318d1bb6f9 | ||
|
|
b8b53901e8 | ||
|
|
6e153781a7 | ||
|
|
f27c2d9760 | ||
|
|
eb91356e28 | ||
|
|
bed2971bf0 | ||
|
|
f0696dfe30 | ||
|
|
a43ed567ee | ||
|
|
fffdbb31f5 | ||
|
|
cacefb9a82 | ||
|
|
d966cef14c | ||
|
|
a551978a3f | ||
|
|
97752ca8fb | ||
|
|
8d5d332daf | ||
|
|
6b3a9bf26a | ||
|
|
c1d9a1e174 | ||
|
|
98120bb864 | ||
|
|
f8ced557e3 | ||
|
|
7b20139c6a | ||
|
|
c496efe9a4 | ||
|
|
cf583e0237 | ||
|
|
f09d0f5fef | ||
|
|
1e6cbaa355 | ||
|
|
be643ecfbc | ||
|
|
0c4ed35b9b | ||
|
|
4e4feebf0a | ||
|
|
291f270904 | ||
|
|
f799be1d6a | ||
|
|
74297a0c55 | ||
|
|
7e13103ba2 | ||
|
|
34baf05d9d | ||
|
|
38c0018906 | ||
|
|
6f25e48cbb | ||
|
|
7e99abb5da | ||
|
|
629019c3e4 | ||
|
|
1402fcb234 | ||
|
|
b26276b416 | ||
|
|
e317f04098 | ||
|
|
65ff330602 | ||
|
|
52763e1918 | ||
|
|
23e06cedbd | ||
|
|
b369fcde28 | ||
|
|
c294068780 | ||
|
|
8a774a3dd4 | ||
|
|
53a8b5a275 | ||
|
|
bbd03f49a4 | ||
|
|
e31578e03c | ||
|
|
0855608bc1 | ||
|
|
f8dbf8292a | ||
|
|
144daec800 | ||
|
|
6a832b7173 | ||
|
|
184a9c8da6 | ||
|
|
88592a1779 | ||
|
|
92fa30a787 | ||
|
|
e4dfe78ef0 | ||
|
|
ba84eecd94 | ||
|
|
ea12d76c03 | ||
|
|
5f0a8a4e28 | ||
|
|
2fc095cd3e | ||
|
|
a2341cc412 | ||
|
|
9685be64cd | ||
|
|
39f5059d48 | ||
|
|
a30e80564d | ||
|
|
8e107b9657 | ||
|
|
21a0693b79 | ||
|
|
4846d9393d | ||
|
|
fc4f20d52f | ||
|
|
60558b5d37 | ||
|
|
5990573ccd | ||
|
|
bd11d3cb62 | ||
|
|
5e5578d2c3 | ||
|
|
1318c6aec8 | ||
|
|
f29757de3b | ||
|
|
f397c35935 | ||
|
|
f365230aea | ||
|
|
ff0b8e10af | ||
|
|
8d16a5693c | ||
|
|
781142a73f | ||
|
|
f471a7e3f5 | ||
|
|
d7a1fd2a6b | ||
|
|
7782eda88e | ||
|
|
d08453d402 | ||
|
|
71e98ea584 | ||
|
|
42d997f639 | ||
|
|
571b4c060b | ||
|
|
ff72059a94 | ||
|
|
2e6ef4f6ec | ||
|
|
0ec6dd9f4b | ||
|
|
0b7fdf16a2 | ||
|
|
5edfd31a6d | ||
|
|
7ee7bc87ae | ||
|
|
1433558c01 | ||
|
|
0458b961c5 | ||
|
|
c1998c4efe | ||
|
|
49da220b65 | ||
|
|
554ee0d963 | ||
|
|
2d2533a08a | ||
|
|
733b072d4f | ||
|
|
2d01a65e36 | ||
|
|
b8280521a5 | ||
|
|
60e6af2605 | ||
|
|
9d16822c63 | ||
|
|
38a0946071 | ||
|
|
95e52e1ac3 | ||
|
|
51ab1c940a | ||
|
|
6f30427357 | ||
|
|
3220acc729 | ||
|
|
3c97933416 | ||
|
|
039e2a9649 | ||
|
|
1c01d0b84a | ||
|
|
39eac7a765 | ||
|
|
082a7065b1 | ||
|
|
f7b08a6982 | ||
|
|
37e32d8c80 | ||
|
|
f2a1b991de | ||
|
|
4128e696d6 | ||
|
|
7e7f3de355 | ||
|
|
1f6a1cd26d | ||
|
|
2cfe2354df | ||
|
|
13387c0838 | ||
|
|
5babf2dc5c | ||
|
|
9012d7c6c1 | ||
|
|
df1faa9a8f | ||
|
|
3de7ad5223 | ||
|
|
9cb3a68c38 | ||
|
|
c1dd76788d | ||
|
|
5ee1816a71 | ||
|
|
63b51c6742 | ||
|
|
e7684b7ed5 | ||
|
|
dda23baf42 | ||
|
|
8575abf599 | ||
|
|
feea0532cd | ||
|
|
d3e8ae1820 | ||
|
|
91a9a959a2 | ||
|
|
04eae51d11 | ||
|
|
8fb707e16d | ||
|
|
4138d5aa75 | ||
|
|
fc654a4cec | ||
|
|
26b5f55cba | ||
|
|
3f572e6bf2 | ||
|
|
941ad6bc62 | ||
|
|
5d1d93e163 | ||
|
|
35fba5bfdd | ||
|
|
887834da91 | ||
|
|
107293c80e | ||
|
|
e3c4ebd59a | ||
|
|
d99ffde7c0 | ||
|
|
198c34ce21 | ||
|
|
0eba88bbfe | ||
|
|
aeea4430d5 | ||
|
|
4b15c4215c | ||
|
|
50452207d9 | ||
|
|
01fcad9b9c | ||
|
|
eb41253764 | ||
|
|
89625e54cf | ||
|
|
58f7141c96 | ||
|
|
e56c6402a7 | ||
|
|
d0eb8ddc30 | ||
|
|
a6c28a5faa | ||
|
|
d35bd15762 | ||
|
|
8b8220c4f7 | ||
|
|
5fe3b0ad71 | ||
|
|
4c8c87a935 | ||
|
|
bb10a51b39 | ||
|
|
df01f7a4eb | ||
|
|
e84790ef79 | ||
|
|
369a8ee17b | ||
|
|
84e21ade6b | ||
|
|
703b0535a4 | ||
|
|
155264ae12 | ||
|
|
31e2ce03c3 | ||
|
|
e969505ae4 | ||
|
|
26e2f1a998 | ||
|
|
2682d5a9cf | ||
|
|
2191592e80 | ||
|
|
18f758294e | ||
|
|
f95c1c61dd | ||
|
|
8c8dcdd521 | ||
|
|
141c133818 | ||
|
|
0f03e55cd1 | ||
|
|
9e6ba92a11 | ||
|
|
762561f88e | ||
|
|
084fe38922 | ||
|
|
63a2a935fc | ||
|
|
64fce8438b | ||
|
|
f92beb4e14 | ||
|
|
f7ce2e8d95 | ||
|
|
3975d82b3b | ||
|
|
d87aa33ec5 | ||
|
|
1b78f4d1ea | ||
|
|
b3704597f3 | ||
|
|
16f797a7d7 | ||
|
|
ee700ec01a | ||
|
|
9b3c951ab7 | ||
|
|
22d17e79e3 | ||
|
|
6d3088a00b | ||
|
|
84202c7471 | ||
|
|
96a05516f9 | ||
|
|
4f6a942595 | ||
|
|
c4b0a37b21 | ||
|
|
9322f4baef | ||
|
|
fa0a1e7261 | ||
|
|
4ad08794c9 | ||
|
|
c0f600764b | ||
|
|
f139e07380 | ||
|
|
c6786eeb2d | ||
|
|
57b85b8155 | ||
|
|
2b1194c57e | ||
|
|
e6dd121f52 | ||
|
|
e600217666 | ||
|
|
bc17ca7ed9 | ||
|
|
1916410316 | ||
|
|
dddfbec92a | ||
|
|
75a88de55c | ||
|
|
2466f4d152 | ||
|
|
39283c8a35 | ||
|
|
46c2f55545 | ||
|
|
fc2afcbcbd | ||
|
|
fa0a9653d2 | ||
|
|
181267e20e | ||
|
|
75e8ea383c | ||
|
|
8c8b58a7de | ||
|
|
b961e07c57 | ||
|
|
0b80d1481a | ||
|
|
89550e7121 | ||
|
|
370c218c63 | ||
|
|
b972dcb0ae | ||
|
|
0bfa9811f7 | ||
|
|
aa9b2c31f4 | ||
|
|
cff75db6a4 | ||
|
|
75252e4a89 | ||
|
|
2089405e1b | ||
|
|
a379eec9d9 | ||
|
|
45d5339fcb | ||
|
|
bb5637d46a | ||
|
|
1f05d5bf4a | ||
|
|
ff87da9c3b | ||
|
|
3d81b75f44 | ||
|
|
baba6d67e6 | ||
|
|
04c0564fe2 | ||
|
|
91cfdb81f5 | ||
|
|
deae7bf33c | ||
|
|
04a0da1f92 | ||
|
|
9486df0226 | ||
|
|
948a5d25c2 | ||
|
|
f7c31cd210 | ||
|
|
696e7b2833 | ||
|
|
e76cf1217f | ||
|
|
543e37f662 | ||
|
|
c514cb752d | ||
|
|
c0ca93ae6f | ||
|
|
38a89d49ae | ||
|
|
6531126eb2 | ||
|
|
25d0e59ef8 | ||
|
|
b0db08fd2b | ||
|
|
07addf74fd | ||
|
|
52c7c738ca | ||
|
|
5c32b32011 | ||
|
|
fe61cff079 | ||
|
|
fbab1e55bb | ||
|
|
1bfd07567e | ||
|
|
f97c4c8d9d | ||
|
|
a3c55462a8 | ||
|
|
bbb9a504a8 | ||
|
|
dedc7d885c | ||
|
|
c5ac96e9e7 | ||
|
|
9959c5f17f | ||
|
|
e8d0a363fc | ||
|
|
935b7c1c0f | ||
|
|
15ce0ae57c | ||
|
|
67703a73de | ||
|
|
f96ce5674b | ||
|
|
7f0b204292 | ||
|
|
83b1ae4833 | ||
|
|
753cc63d96 | ||
|
|
5dac8e055f | ||
|
|
c3a8eb1c10 | ||
|
|
0f2a5403db | ||
|
|
dcce84714e | ||
|
|
eb8130f48a | ||
|
|
aa58f66806 | ||
|
|
a3dc591b8e | ||
|
|
5ee1bd7ba4 | ||
|
|
dbedf33b9f | ||
|
|
0f02c9540c | ||
|
|
06922674c8 | ||
|
|
8ad7da066c | ||
|
|
e1503add41 | ||
|
|
6fea75afde | ||
|
|
6a773289e7 | ||
|
|
ade252f13b | ||
|
|
bb2e361004 | ||
|
|
b24facb73d | ||
|
|
014d58a757 | ||
|
|
1d16e16b30 | ||
|
|
249a523dd3 | ||
|
|
8d72ef8d1e | ||
|
|
bc8f0208aa | ||
|
|
ee25b6106a | ||
|
|
5c1b135304 | ||
|
|
2f2029fed5 | ||
|
|
57273d364b | ||
|
|
84289d1d69 | ||
|
|
98e2746e31 | ||
|
|
c00ec0cbe4 | ||
|
|
1a40bceb1d | ||
|
|
411a6cc472 | ||
|
|
1e2676df26 | ||
|
|
364fca5cea | ||
|
|
87e1efa997 | ||
|
|
6709084e2f | ||
|
|
6b1f915ebc | ||
|
|
78b9bd77f5 | ||
|
|
a9273c5da5 | ||
|
|
14128656db | ||
|
|
1557287c64 | ||
|
|
e7e467fb3a | ||
|
|
5fde7d8b12 | ||
|
|
3c086f5f7f | ||
|
|
c0084f43dd | ||
|
|
ddbd4fd881 | ||
|
|
7826e39fcf | ||
|
|
06ae4258be | ||
|
|
d9037fe2be | ||
|
|
1d14972e41 | ||
|
|
05fa9cb379 | ||
|
|
59e14c25df | ||
|
|
fc640d3a09 | ||
|
|
e1f67295b4 | ||
|
|
22ac80e83a | ||
|
|
c7aa6b587b | ||
|
|
8d1848bebe | ||
|
|
527c0af1c3 | ||
|
|
a20fae0364 | ||
|
|
15b1a1f909 | ||
|
|
80b25daac7 | ||
|
|
70b30d5ca4 | ||
|
|
0b2fc621fc | ||
|
|
171e39b230 | ||
|
|
690a44e40e | ||
|
|
d9a3b26e47 | ||
|
|
1eec59e091 | ||
|
|
96ce49ec4e | ||
|
|
ae63e4b4f0 | ||
|
|
e2fb588eb9 | ||
|
|
382a6863b5 | ||
|
|
7b975bc1ff | ||
|
|
467fe30a5e | ||
|
|
4415aa5c2e | ||
|
|
17ab38502d | ||
|
|
9fa8c959ee | ||
|
|
f29c6049fc | ||
|
|
e44fa5db8e | ||
|
|
03ea05b860 | ||
|
|
b8678c9d4b | ||
|
|
13823a7743 | ||
|
|
b94d87ae2d | ||
|
|
e0c5f7ff1b | ||
|
|
b22ecbe174 | ||
|
|
c41be436c6 | ||
|
|
e022ffce0f | ||
|
|
cfe65f1e72 | ||
|
|
b18595ae07 | ||
|
|
d27630626a | ||
|
|
c473c7cb53 | ||
|
|
ef3526b3b8 | ||
|
|
d4ee7277c0 | ||
|
|
4a3efa5d45 | ||
|
|
a14f0d46d7 | ||
|
|
a25875170b | ||
|
|
a288646419 | ||
|
|
b3d8bc61ac | ||
|
|
7accd30da8 | ||
|
|
9594fd0a0c | ||
|
|
aac84c554a | ||
|
|
5716a58413 | ||
|
|
233507bfe0 | ||
|
|
5b27702b61 | ||
|
|
b2a6a97443 | ||
|
|
2543278c3f | ||
|
|
19cf3bb9e7 | ||
|
|
3c44ef788a | ||
|
|
e5663de09e | ||
|
|
7cfd4e56f8 | ||
|
|
282540c2d4 | ||
|
|
1e7a7d756f | ||
|
|
f6ee0795ac | ||
|
|
eb5a95e7de | ||
|
|
5b1c162fb2 | ||
|
|
d51501938a | ||
|
|
a823d518ac | ||
|
|
87d4e32a6b | ||
|
|
820d2a7149 | ||
|
|
9fe39f25e1 | ||
|
|
1b2cc781e5 | ||
|
|
e05ec2b77e | ||
|
|
9e3ea3c6ac | ||
|
|
0fb12112f5 | ||
|
|
1b95ca2852 | ||
|
|
e07a850be3 | ||
|
|
3fccce625c | ||
|
|
a1f935e815 | ||
|
|
22ee4151fd | ||
|
|
cc23ad71ce | ||
|
|
57b9fff904 | ||
|
|
692ad482dc | ||
|
|
c6f1c3c7f6 | ||
|
|
164d1e05ca | ||
|
|
c644241392 | ||
|
|
89be5cadaa | ||
|
|
f326f94b97 | ||
|
|
3d2117887d | ||
|
|
5a6750e1cd | ||
|
|
6b8b9d19f3 | ||
|
|
4ca26eb38c | ||
|
|
37b2754f37 | ||
|
|
172beb2ae3 | ||
|
|
5eba392a04 | ||
|
|
deda093637 | ||
|
|
a4c4019032 | ||
|
|
2e37942592 | ||
|
|
09d7bd2d40 | ||
|
|
ff0efb1501 | ||
|
|
0f1d4a7ca8 | ||
|
|
a0b3fd3a33 | ||
|
|
cdbe3691b7 | ||
|
|
3a0b3b0f6e | ||
|
|
d3afef3e1b | ||
|
|
f4aaec9ce5 | ||
|
|
bd5d326160 | ||
|
|
5b9b9f1572 | ||
|
|
571c8754de | ||
|
|
fb9a95e68e | ||
|
|
85e0839c8b | ||
|
|
1749fb8ebf | ||
|
|
e114be11ec | ||
|
|
b709f73aab | ||
|
|
05a615ef22 | ||
|
|
76450c01f3 | ||
|
|
86e64c626c | ||
|
|
9b827be418 | ||
|
|
7e5c6725c1 | ||
|
|
543d75723b | ||
|
|
b4d94f255a | ||
|
|
b0dd218fea | ||
|
|
6396872d75 | ||
|
|
6cf684f2a1 | ||
|
|
20c55a6829 | ||
|
|
a3fec7f030 | ||
|
|
8e2b3268be | ||
|
|
32ab4e9ac6 | ||
|
|
b4d86d5450 | ||
|
|
d49ba652e2 | ||
|
|
7d74686698 | ||
|
|
2d7c5ebc7a | ||
|
|
86e3436d55 | ||
|
|
f243d2a309 | ||
|
|
aaa3d7e63b | ||
|
|
e4c5f248c0 | ||
|
|
5afaa48d06 | ||
|
|
1c578ced1c | ||
|
|
de6ec8056f | ||
|
|
66fe4a2523 | ||
|
|
f5617dadf3 | ||
|
|
5e75a9ef5c | ||
|
|
da1682a30e | ||
|
|
5c75453aba | ||
|
|
84ef6b2ae6 | ||
|
|
7194c358ad | ||
|
|
502d8b0cdd | ||
|
|
ca44fb1fba | ||
|
|
842ed7d2a9 | ||
|
|
b3217d2cac | ||
|
|
94950258a4 | ||
|
|
8656bd2bb0 | ||
|
|
174ca22936 | ||
|
|
4eefd05dcf | ||
|
|
1cccfa7331 | ||
|
|
18685e6b0b | ||
|
|
b6db90cc32 | ||
|
|
b596ccdf0f | ||
|
|
a64e0922b9 | ||
|
|
3751ceebdd | ||
|
|
9f671c5dd0 | ||
|
|
c6c74cb869 | ||
|
|
f9cf70e3aa | ||
|
|
ee4485a316 | ||
|
|
455219f501 | ||
|
|
1b8f4b616c | ||
|
|
f818df52b8 | ||
|
|
29fa840d3a | ||
|
|
7712a0e111 | ||
|
|
77806494c8 | ||
|
|
ff8de59d2b | ||
|
|
c19d1ae9a5 | ||
|
|
64ecc2f587 | ||
|
|
7c911bf2d6 | ||
|
|
41f709e13b | ||
|
|
6c5ccf26b1 | ||
|
|
6dc5aa7454 | ||
|
|
552eb8e06b | ||
|
|
7d35b14138 | ||
|
|
6199b95b61 | ||
|
|
040768383b | ||
|
|
6390dec7db | ||
|
|
80a3db34a8 | ||
|
|
cb7a461287 | ||
|
|
eb84b58d3c | ||
|
|
58339a5cb6 | ||
|
|
751bfd456f | ||
|
|
990919f268 | ||
|
|
6301e15b79 | ||
|
|
007c7757d4 | ||
|
|
dd3e912731 | ||
|
|
10ed455777 | ||
|
|
05bec70c3e | ||
|
|
c54f5a781e | ||
|
|
6156bc5806 | ||
|
|
e979cd62c1 | ||
|
|
687477b34d | ||
|
|
40d383e223 | ||
|
|
6bfdabab6d | ||
|
|
f7c1c61dda | ||
|
|
c1f5add049 | ||
|
|
8989c367c4 | ||
|
|
d95667b06d | ||
|
|
0f845e3a59 | ||
|
|
2e80d4c18e | ||
|
|
6349147af4 | ||
|
|
782972088d | ||
|
|
38381d3786 | ||
|
|
eb6aafbd14 | ||
|
|
7f3d5c31d9 | ||
|
|
578f56bba7 | ||
|
|
f7c0b2407d | ||
|
|
dc5a734522 | ||
|
|
3c2ffa7f57 | ||
|
|
06c9f76cd2 | ||
|
|
44abf6473e | ||
|
|
b99595b7ce | ||
|
|
a119ca9f10 | ||
|
|
ffd11662ba | ||
|
|
1f3778dbfb | ||
|
|
f9eb9c894d | ||
|
|
f7a92f2c15 | ||
|
|
42959fe9c3 | ||
|
|
f72eade707 | ||
|
|
bbda4ab1f1 | ||
|
|
916b6e3a40 | ||
|
|
dd670ad1db | ||
|
|
7983b6bdca | ||
|
|
9815b09d90 | ||
|
|
9c90b5e77c | ||
|
|
01af8e2905 | ||
|
|
f06ba393b8 | ||
|
|
473e3c3eb8 | ||
|
|
ab78eb13e4 | ||
|
|
b1f31c2acf | ||
|
|
dcc74fa404 | ||
|
|
6759d36e2f | ||
|
|
a4797014c9 | ||
|
|
4d7d240c12 | ||
|
|
d046402d80 | ||
|
|
9bdf465c10 | ||
|
|
f3f48d7d49 | ||
|
|
3c89406886 | ||
|
|
85d09729f2 | ||
|
|
b3bd2d1c9e | ||
|
|
4c586a9264 | ||
|
|
1c80e84f8a | ||
|
|
028f8a69d3 | ||
|
|
b0d1fa1d6b | ||
|
|
dbb4b2c900 | ||
|
|
99201f8ba4 | ||
|
|
5ad8bcb43a | ||
|
|
6efedc4043 | ||
|
|
a3d9a38f51 | ||
|
|
b1bd17a220 | ||
|
|
793f594b07 | ||
|
|
4fe6614ae1 | ||
|
|
4c2fbf9b36 | ||
|
|
ed4f1b2936 | ||
|
|
144c1a04d4 | ||
|
|
25ec7f5c00 | ||
|
|
b15603d5ea | ||
|
|
71c974bf9a | ||
|
|
03c5b8232e | ||
|
|
72392a2d72 | ||
|
|
b062ae9d13 | ||
|
|
8c0335a176 | ||
|
|
794e55de27 | ||
|
|
038ed1aaf0 | ||
|
|
97beff5370 | ||
|
|
b9b9bce0db | ||
|
|
947e10eb2b | ||
|
|
6b42421374 | ||
|
|
fa051ff970 | ||
|
|
69164b3dda | ||
|
|
935533e57f | ||
|
|
1550f70865 | ||
|
|
1a65c3a740 | ||
|
|
a29a1de43d | ||
|
|
e7ae5e8ee0 | ||
|
|
56e1e82005 | ||
|
|
8442498693 | ||
|
|
08021c4636 | ||
|
|
3f0789e2db | ||
|
|
7110349547 | ||
|
|
a9adb43896 | ||
|
|
c47a4c9703 | ||
|
|
d9d00a7dd7 | ||
|
|
b82e66daaa | ||
|
|
7d2861ead6 | ||
|
|
aaa8591661 | ||
|
|
4df1794932 | ||
|
|
d18928962c | ||
|
|
339fbf0df5 | ||
|
|
13ccb39819 | ||
|
|
f9a1a7e700 | ||
|
|
1c75581959 | ||
|
|
4d793b8ee8 | ||
|
|
9289aead9b | ||
|
|
ce109ed9c0 | ||
|
|
d7ac4ca44e | ||
|
|
1053d7e123 | ||
|
|
017297af70 | ||
|
|
4e8e5fed7d | ||
|
|
c0f772bc14 | ||
|
|
334ef28012 | ||
|
|
da45dadfe9 | ||
|
|
05edb5f501 | ||
|
|
04d18d2a07 | ||
|
|
f1269dc06a | ||
|
|
c5286ee157 | ||
|
|
ba43acb6aa | ||
|
|
8a84975993 | ||
|
|
d758e1908e | ||
|
|
737aed8412 | ||
|
|
4009fb67c8 | ||
|
|
3ef938ebde | ||
|
|
5302e5f9b1 | ||
|
|
de8c7d8e45 | ||
|
|
2a29f7f6c8 | ||
|
|
2b332bced2 | ||
|
|
aad75e6720 | ||
|
|
2a806a8d8b | ||
|
|
500085d244 | ||
|
|
3d8e529441 | ||
|
|
6607d8752c | ||
|
|
67e9ef4547 | ||
|
|
d4213c0ac5 | ||
|
|
3a2248aa5f | ||
|
|
573ef4c8ee | ||
|
|
7bf2d389a8 | ||
|
|
71b4f1ccab | ||
|
|
e5ff375948 | ||
|
|
512f4b4487 | ||
|
|
a38f8b87ce | ||
|
|
9697754707 | ||
|
|
8e625e0bc3 | ||
|
|
e52ecba295 | ||
|
|
e62d2fd309 | ||
|
|
e56be0dfd8 | ||
|
|
2a32e2d838 | ||
|
|
db4c206e0e | ||
|
|
f77efc7649 | ||
|
|
aadbcce486 | ||
|
|
f162116132 | ||
|
|
909c3a92d6 | ||
|
|
826975c341 | ||
|
|
6791cf7d7f | ||
|
|
d022c81d99 | ||
|
|
cdde8fa75a | ||
|
|
5ede6f6d09 | ||
|
|
53292527bb | ||
|
|
ec9894da07 | ||
|
|
ad02d1be3f | ||
|
|
63f413f477 | ||
|
|
f1ffe8e309 | ||
|
|
d85b9bc9d6 | ||
|
|
b07e51cf73 | ||
|
|
f073db81b1 | ||
|
|
9698a2babb | ||
|
|
5eecbd83ee | ||
|
|
e42edc8e8c | ||
|
|
291954baba | ||
|
|
9d8d7ae1f0 | ||
|
|
6ce32e4661 | ||
|
|
1755ffd1f3 | ||
|
|
aa5c5ec5d3 | ||
|
|
e80ae4e09c | ||
|
|
1320e84bc2 | ||
|
|
cb5bd47e61 | ||
|
|
790a8a9aed | ||
|
|
f1a43eca4d | ||
|
|
7ea68f1fc6 | ||
|
|
6427029c4e | ||
|
|
21383877df | ||
|
|
f95835d613 | ||
|
|
be79b47a7a | ||
|
|
be22735609 | ||
|
|
1b1b3c13cd | ||
|
|
5c128272fd | ||
|
|
d178233e74 | ||
|
|
98bf65c43b | ||
|
|
3b5e70c8c6 | ||
|
|
bd3ad1ac3e | ||
|
|
9fdf273614 | ||
|
|
fe25cb9c54 | ||
|
|
f2608e2a64 | ||
|
|
a5f1811892 | ||
|
|
50dc5fe92e | ||
|
|
b7d2048032 | ||
|
|
3116249692 | ||
|
|
d049e5c680 | ||
|
|
1c9572aba1 | ||
|
|
76f2cbeb94 | ||
|
|
0479c7dcf5 | ||
|
|
55674c0bfc | ||
|
|
e4c380b2a8 | ||
|
|
74cbdea0ef | ||
|
|
a3bf6b9c2c | ||
|
|
0daced29db | ||
|
|
b78af517de | ||
|
|
d8e88f10cd | ||
|
|
849db6699d | ||
|
|
a81ec00a8c | ||
|
|
da4a5e1fb3 | ||
|
|
ae562b5a4f | ||
|
|
c01177bc28 | ||
|
|
9f04ce282e | ||
|
|
764440068e | ||
|
|
a703216286 | ||
|
|
96a62d55a2 | ||
|
|
d0f32b62fd | ||
|
|
7c5f87842c | ||
|
|
cc8799e0d6 | ||
|
|
da214973a1 | ||
|
|
be8bd89674 | ||
|
|
9ab2521ef2 | ||
|
|
21a10e58c9 | ||
|
|
d36b80f587 | ||
|
|
24980d7123 | ||
|
|
870c58f7f8 | ||
|
|
b3c6f5f4b8 | ||
|
|
311a962011 | ||
|
|
da7a77ef2e | ||
|
|
9fbc40c5b9 | ||
|
|
56ce784301 | ||
|
|
8fe3037301 | ||
|
|
ba7ae2ee8c | ||
|
|
dc59836021 | ||
|
|
1a3fb21a77 | ||
|
|
bcdb7719c6 | ||
|
|
c51d97c752 | ||
|
|
57a5b72d60 | ||
|
|
34ba17deec | ||
|
|
e3a1bc9cd3 | ||
|
|
a35e62e15c | ||
|
|
d1ca8b8959 | ||
|
|
a0c65deca8 | ||
|
|
1f255a8567 | ||
|
|
f50b85278a | ||
|
|
9948b39dba | ||
|
|
2b855751fc | ||
|
|
ef3bcec76c | ||
|
|
1ac6dacf0f | ||
|
|
94e277d759 | ||
|
|
b83814082b | ||
|
|
2b7957cc74 | ||
|
|
3d5106e52b | ||
|
|
29ce1c2747 | ||
|
|
dc247d21ff | ||
|
|
8c3740c2c5 | ||
|
|
acd5d4377e | ||
|
|
9e4cd55477 | ||
|
|
2015f98f0c | ||
|
|
0e6faa2313 | ||
|
|
905e40b3e6 | ||
|
|
1db68571fd | ||
|
|
6b67489133 | ||
|
|
27dfcf303c | ||
|
|
e6d9720d7b | ||
|
|
196da4d903 | ||
|
|
18317a2747 | ||
|
|
ef412c1985 | ||
|
|
d97fe3b824 | ||
|
|
792c9e185e | ||
|
|
1f681e585b | ||
|
|
e82452ce9a | ||
|
|
dcf8334673 | ||
|
|
37be78705d | ||
|
|
4b5ff33125 | ||
|
|
d5b2ec32f1 | ||
|
|
aeedacfb50 | ||
|
|
92b266d361 | ||
|
|
05e32cfcf9 | ||
|
|
cbec59146a | ||
|
|
06e3fa3aba | ||
|
|
0fa700b3cf | ||
|
|
42f0963bf9 | ||
|
|
be54fd8f70 | ||
|
|
e5be471ce0 | ||
|
|
80588a5a6b | ||
|
|
67023f0040 | ||
|
|
32e02bd367 | ||
|
|
c749cf8d99 | ||
|
|
92cfb57fbd | ||
|
|
0cb5c4aa73 | ||
|
|
0358e9e724 | ||
|
|
a69d8ec93b | ||
|
|
92c5aa3786 | ||
|
|
fbe1c7f1ea | ||
|
|
c4531daa43 | ||
|
|
6e11a25df5 | ||
|
|
0865e38917 | ||
|
|
ab2fa59fc4 | ||
|
|
e13f65b953 | ||
|
|
5b8977a053 | ||
|
|
1dea99ab20 | ||
|
|
06a8d3011d | ||
|
|
e7fd607078 | ||
|
|
eca99b33c0 | ||
|
|
e42cee5e02 | ||
|
|
d45c750f76 | ||
|
|
2c2bb0f750 | ||
|
|
a8267d1628 | ||
|
|
9df266a6b4 | ||
|
|
4d553ef701 | ||
|
|
1ba3ffdc59 | ||
|
|
72f1b097a7 | ||
|
|
885044d0a5 | ||
|
|
6c10312c75 | ||
|
|
e5aa5fe7d8 | ||
|
|
9b140b42c9 | ||
|
|
0bfbde8856 | ||
|
|
98a924602f | ||
|
|
7e80e609e8 | ||
|
|
91b068ad3a | ||
|
|
b52e34ef5e | ||
|
|
32e6eee341 | ||
|
|
c5f1d501ed | ||
|
|
0ed0d9a7bc | ||
|
|
d9c13bff83 | ||
|
|
ce91289b09 | ||
|
|
5ba5be9b37 | ||
|
|
e9a2cbec37 | ||
|
|
4f6f07c074 | ||
|
|
f6020f1308 | ||
|
|
a46f2a9eb7 | ||
|
|
911a78ce6d | ||
|
|
d64789528d | ||
|
|
940df88eb2 | ||
|
|
19ca9fb939 | ||
|
|
26f1c55987 | ||
|
|
1afac32d80 | ||
|
|
26fbd00b4f | ||
|
|
1313b529ff | ||
|
|
82e835d6fc | ||
|
|
fa867a9a4c | ||
|
|
38d9475a34 | ||
|
|
c21c7e75b0 | ||
|
|
c8d095612a | ||
|
|
012d4a1235 | ||
|
|
854d3c3025 | ||
|
|
5bedc4c668 | ||
|
|
86892467d9 | ||
|
|
e62fe06763 | ||
|
|
4295428a0f | ||
|
|
2db0c4dd95 | ||
|
|
5bf639048f | ||
|
|
4924ac2f17 | ||
|
|
d4cca8d9f9 | ||
|
|
a9e386b153 | ||
|
|
117238211b | ||
|
|
645cf5ec0f | ||
|
|
d1bb8efb88 | ||
|
|
c19e675ca6 | ||
|
|
34c45a7c04 | ||
|
|
0a0318df20 | ||
|
|
04e055fc06 | ||
|
|
d551137635 | ||
|
|
aba43cd3a4 | ||
|
|
7f744033d8 | ||
|
|
078d705dbe | ||
|
|
5981f9fab5 | ||
|
|
84776c4e43 | ||
|
|
c1a3e363a6 | ||
|
|
7ccc6080b0 | ||
|
|
677971643c | ||
|
|
f4a1c1163c | ||
|
|
97b48cf988 | ||
|
|
86e5a35491 | ||
|
|
8bb2854fe4 | ||
|
|
d76da1f5fd | ||
|
|
89748feaa5 | ||
|
|
dfd0f4c5a4 | ||
|
|
0c9dc006c5 | ||
|
|
4e90ad04d5 | ||
|
|
43c7ea81df | ||
|
|
fa003e89b6 | ||
|
|
5114b11d6f | ||
|
|
f832433fa5 | ||
|
|
d073efdc6c | ||
|
|
9e48748182 | ||
|
|
b6058e0106 | ||
|
|
66c69fe620 | ||
|
|
a2336ad774 | ||
|
|
7713acf23d | ||
|
|
473a388f6d | ||
|
|
c8a4d437a0 | ||
|
|
09c14af6d1 | ||
|
|
acae10cd6f | ||
|
|
0861207ace | ||
|
|
a7dbf32c53 | ||
|
|
6025bb6ad1 | ||
|
|
70f07fd3ac | ||
|
|
b3f55d6bda | ||
|
|
d9094f1a45 | ||
|
|
572ee5ec96 | ||
|
|
316dac25c2 | ||
|
|
ee3c45676f | ||
|
|
2e7e15461b | ||
|
|
0175332987 | ||
|
|
85e0b87c99 | ||
|
|
d41017a277 | ||
|
|
fc32fee4ad | ||
|
|
5795bd7db6 | ||
|
|
9b011ce7e4 | ||
|
|
5e334eedd2 | ||
|
|
7fb53a031c | ||
|
|
ebfeec9fb4 | ||
|
|
90af7af9a3 | ||
|
|
fe8eeec5b5 | ||
|
|
e0eb666dbf | ||
|
|
7d4da1c66a | ||
|
|
f3e982d3bf | ||
|
|
3f9d0d3baf | ||
|
|
e9fd2250eb | ||
|
|
769aa860f2 | ||
|
|
fdebf9da31 | ||
|
|
77f344a69d | ||
|
|
62540b4007 | ||
|
|
21faac6e6c | ||
|
|
167a4396c7 | ||
|
|
1585aa61c1 | ||
|
|
b91bd32489 | ||
|
|
c3d0f68923 | ||
|
|
f57e92b9a5 | ||
|
|
baf9ee5cf7 | ||
|
|
354f1ad722 | ||
|
|
54deb01f00 | ||
|
|
3282fd26af | ||
|
|
88d830c7b7 | ||
|
|
724120d2f3 | ||
|
|
25bbc5d22b | ||
|
|
00adf40f9f | ||
|
|
aeefa34f62 | ||
|
|
9252224d82 | ||
|
|
1383df4f58 | ||
|
|
0ce81f68fe | ||
|
|
20ca7d0e4f | ||
|
|
4c3d42bcbb | ||
|
|
2ef8de0843 | ||
|
|
a70200dd29 | ||
|
|
c99412d11e | ||
|
|
abc736df1d | ||
|
|
ab0d06eb16 | ||
|
|
9ffc3898b1 | ||
|
|
afc963ed92 | ||
|
|
c929de9dc4 | ||
|
|
451cd6d971 | ||
|
|
a647c54888 | ||
|
|
334bf49d30 | ||
|
|
d8f78a7266 | ||
|
|
62e72801be | ||
|
|
358c1fbac9 | ||
|
|
cc9d7156e4 | ||
|
|
221a8a9c5d | ||
|
|
2b6f7028a6 | ||
|
|
5530662ccc | ||
|
|
442334ba61 | ||
|
|
70b4842823 | ||
|
|
2f63a9f81c | ||
|
|
8a9ed57951 | ||
|
|
a5c3bcc9c7 | ||
|
|
9b800d7184 | ||
|
|
b1945d0094 | ||
|
|
9a34fd984c | ||
|
|
644313a4b9 | ||
|
|
675e7c5d8e | ||
|
|
99f3c8bc93 | ||
|
|
ff6a7142da | ||
|
|
691c725e8b | ||
|
|
ee388c4331 | ||
|
|
771fbbe314 | ||
|
|
ab8c0a81fa | ||
|
|
cd7fd51119 | ||
|
|
0f787e43b0 | ||
|
|
3a7bb7b2df | ||
|
|
54724a1362 | ||
|
|
846bbef1e9 | ||
|
|
b33e3f779c | ||
|
|
8a25ca786c | ||
|
|
04a0a7406b | ||
|
|
9a653fea10 | ||
|
|
b183bd7f00 | ||
|
|
5055b340da | ||
|
|
6546b7e0b0 | ||
|
|
f4a5489d19 | ||
|
|
82418c3021 | ||
|
|
bf6101cb6c | ||
|
|
5723d2dbff | ||
|
|
d0d6b83a7a | ||
|
|
bea02fcf52 | ||
|
|
8722403b0d | ||
|
|
9aa8815990 | ||
|
|
6fb868e00c | ||
|
|
2f746426e7 | ||
|
|
4c1ffc7f54 | ||
|
|
1018e9bb27 | ||
|
|
295c3fabec | ||
|
|
3f8d286a75 | ||
|
|
fc8641809e | ||
|
|
de35f1c165 | ||
|
|
2974efc7d6 | ||
|
|
a6227f34e2 | ||
|
|
3c7a755631 | ||
|
|
8df78f2b6d | ||
|
|
44276db454 | ||
|
|
2eb5cfb7ad | ||
|
|
b3d8b7e22e | ||
|
|
ed2d4ef4a2 | ||
|
|
11fe3fdc16 | ||
|
|
cf6d522d2f | ||
|
|
29d428040c | ||
|
|
1aa482c333 | ||
|
|
40af98b0b3 | ||
|
|
c277a4096c | ||
|
|
1852a0e0c9 | ||
|
|
44cedbd9d9 | ||
|
|
540e00e938 | ||
|
|
a4fe2455ed | ||
|
|
f622017539 | ||
|
|
07f20dd1fd | ||
|
|
fe52502f19 | ||
|
|
9a73688e3a | ||
|
|
bc3ee977f4 | ||
|
|
a69fc8b80d | ||
|
|
926cd52a7f | ||
|
|
c2ce3114f4 | ||
|
|
29286cc8b3 | ||
|
|
1f5e23aedb | ||
|
|
d016438243 | ||
|
|
fa500e6d21 | ||
|
|
dbabb18b0c | ||
|
|
6f6f2aa369 | ||
|
|
17dabf7a99 | ||
|
|
9520992a54 | ||
|
|
a3dd2c691e | ||
|
|
38f829842a | ||
|
|
f9806848fe | ||
|
|
88e0770f2d | ||
|
|
a6833b68ca | ||
|
|
e44dc2b14d | ||
|
|
d876392d15 | ||
|
|
c098e25552 | ||
|
|
186f78d44f | ||
|
|
ea69deaa4c | ||
|
|
c963c74fbe | ||
|
|
9c45125271 | ||
|
|
8653944a6d | ||
|
|
84bc4dc142 | ||
|
|
84d00e9046 | ||
|
|
71bc108ce6 | ||
|
|
e57a388851 | ||
|
|
bfa2878d24 | ||
|
|
dcdb43eb07 | ||
|
|
115d24e1f7 | ||
|
|
62b74d06ff | ||
|
|
7117ba7d58 | ||
|
|
5e73acd40a | ||
|
|
25a41e1945 | ||
|
|
ee66419a27 | ||
|
|
8e86a902e2 | ||
|
|
a80d8a21dc | ||
|
|
517bdc719b | ||
|
|
5ad226ab54 | ||
|
|
a375992186 | ||
|
|
b96c73bee6 | ||
|
|
97c414f025 | ||
|
|
71722b5b95 | ||
|
|
59a8108fc3 | ||
|
|
821be5ebed | ||
|
|
2030dc13b2 | ||
|
|
5cce74d630 | ||
|
|
acd55a8f65 | ||
|
|
ad76dd0adc | ||
|
|
8c90bfb0cd | ||
|
|
4b0c5f79b5 | ||
|
|
1848e26183 | ||
|
|
7d3a17725d | ||
|
|
8e83fb6fb9 | ||
|
|
11da2a6c9b | ||
|
|
92624bbbf1 | ||
|
|
60afda007b | ||
|
|
b8b620f5c2 | ||
|
|
0a7731cf0d | ||
|
|
6cac98d2ce | ||
|
|
712e6a8085 | ||
|
|
6d333da69f | ||
|
|
5c7e8d5a2b | ||
|
|
57f1bb7bb2 | ||
|
|
5e83dce1f6 | ||
|
|
052c886317 | ||
|
|
28480c0570 | ||
|
|
72349bdaae | ||
|
|
36e6d23112 | ||
|
|
0eba37d8f3 | ||
|
|
c74c3b37da | ||
|
|
7c71ee1a5b | ||
|
|
ed20fa5ee7 | ||
|
|
54a9fdf421 | ||
|
|
0d041602cf | ||
|
|
8f47d7fc06 | ||
|
|
4dd1e507f4 | ||
|
|
65618afd8c | ||
|
|
be4ed14525 | ||
|
|
ef89f1f1a7 | ||
|
|
b412c745a1 | ||
|
|
f34a9116d4 | ||
|
|
64ea94c1a4 | ||
|
|
4eac50eb83 | ||
|
|
5683f74025 | ||
|
|
fe71d4fd87 | ||
|
|
a64d92bd35 | ||
|
|
c5cf0792f2 | ||
|
|
255d3e925d | ||
|
|
0d4bff8239 | ||
|
|
4ba58884b1 | ||
|
|
8839e4ee33 | ||
|
|
ebbe77f525 | ||
|
|
6f1ae00c7f | ||
|
|
6b5989712f | ||
|
|
29d34426bc | ||
|
|
2a01fa9fa0 | ||
|
|
4c0e2f9b3b | ||
|
|
240c97cd7a | ||
|
|
2fd0bec4e4 | ||
|
|
7e585cda96 | ||
|
|
1b1593a894 | ||
|
|
9c242edc10 | ||
|
|
0914ec316c | ||
|
|
2cf808c825 | ||
|
|
66558213e0 | ||
|
|
84701e376a | ||
|
|
829dd1ad25 | ||
|
|
7c972d375b | ||
|
|
3d2f3d9a7f | ||
|
|
845b22a628 | ||
|
|
3684585104 | ||
|
|
f424019380 | ||
|
|
ab03f6e475 | ||
|
|
b48b537325 | ||
|
|
b05e472d2e | ||
|
|
5061aaaf46 | ||
|
|
e00616b016 | ||
|
|
09f203f62b | ||
|
|
2965cbe264 | ||
|
|
bb3ba7b314 | ||
|
|
f12512dd13 | ||
|
|
25b073c767 | ||
|
|
ebd7780188 | ||
|
|
fa4a25a73b | ||
|
|
934df67aef | ||
|
|
006b296c34 | ||
|
|
38b85e94ea | ||
|
|
4b185355df | ||
|
|
7d15c33e42 | ||
|
|
11332a19a0 | ||
|
|
a1f8318b29 | ||
|
|
e767c9ac9f | ||
|
|
56cfb810a8 | ||
|
|
835ca15ec8 | ||
|
|
4af4bbb539 | ||
|
|
47450ba326 | ||
|
|
639e812789 | ||
|
|
1c6cad2252 | ||
|
|
6d3df6f172 | ||
|
|
c16ac697a9 | ||
|
|
0978957a2e | ||
|
|
d1b19f975d | ||
|
|
aab8051f50 | ||
|
|
1248beb0b2 | ||
|
|
6448c445f5 | ||
|
|
fdb01437d8 | ||
|
|
729e1305b7 | ||
|
|
02ffd43572 | ||
|
|
e53892f53b | ||
|
|
6c62fced60 | ||
|
|
c64ad851af | ||
|
|
4c116af1d0 | ||
|
|
8357a82eee | ||
|
|
483f4b8ad9 | ||
|
|
6f61da5c75 | ||
|
|
159fce0106 | ||
|
|
569c1a2ec1 | ||
|
|
2497ca5134 | ||
|
|
cbe5d7ce64 | ||
|
|
1a65a4e769 | ||
|
|
bcf1ece43b | ||
|
|
b4aa920a3d | ||
|
|
41a97e39c8 | ||
|
|
abbcb2f5e0 | ||
|
|
cb6de4a2cf | ||
|
|
dc1c679c65 | ||
|
|
3fb4fe31d2 | ||
|
|
76b151984c | ||
|
|
f0ed384786 | ||
|
|
f80f7a0509 | ||
|
|
af50f31f7d | ||
|
|
8e2213fbbd | ||
|
|
085c690798 | ||
|
|
2b666187a6 | ||
|
|
00b46a8b96 | ||
|
|
b21f227bd3 | ||
|
|
e98e550021 | ||
|
|
60945d0a37 | ||
|
|
b4083b4371 | ||
|
|
eb3415db50 | ||
|
|
9fbd8a6419 | ||
|
|
9738f8532b | ||
|
|
a5b034a992 | ||
|
|
321b6da7af | ||
|
|
1b22ee5b93 | ||
|
|
eab55ce882 | ||
|
|
61b6159a05 | ||
|
|
c560017934 | ||
|
|
7c3584f4e6 | ||
|
|
981cfb1bec | ||
|
|
992647b157 | ||
|
|
dec21ccf63 | ||
|
|
94adf4f43b | ||
|
|
e7f2935333 | ||
|
|
f5f8c0c438 | ||
|
|
60cdcf784c | ||
|
|
57a5c67729 | ||
|
|
d7908c06c9 | ||
|
|
8951875c21 | ||
|
|
05a1e1532b | ||
|
|
7f20e1d7f3 | ||
|
|
bb0ce0cb5f | ||
|
|
e946a8eab0 | ||
|
|
a0cfa0929b | ||
|
|
3fb1e96988 | ||
|
|
46947b3b9b | ||
|
|
de98e2480d | ||
|
|
3cf7c61aa0 | ||
|
|
d8b3bf014d | ||
|
|
0bfa29cbcf | ||
|
|
6cc968b085 | ||
|
|
ce5b3a531d | ||
|
|
5acb6f47e7 | ||
|
|
409ba56fde | ||
|
|
5d875e8840 | ||
|
|
429bb7e8b8 | ||
|
|
7d3abdc463 | ||
|
|
538246f6c3 | ||
|
|
557dd8f031 | ||
|
|
37aaa19f3a | ||
|
|
cef2e3bf83 | ||
|
|
a3a436ce16 | ||
|
|
5d05df3124 | ||
|
|
421ba84e12 | ||
|
|
7ae7080824 | ||
|
|
31d2fb4e11 | ||
|
|
704e82aab1 | ||
|
|
fc352c1ff6 | ||
|
|
e491093cd1 | ||
|
|
016abf825e | ||
|
|
0c942199c9 | ||
|
|
aec2265be0 | ||
|
|
2423fa40e2 | ||
|
|
4355f3fe97 | ||
|
|
9fbff7bcab | ||
|
|
413faa99cf | ||
|
|
ed91d6b5a5 | ||
|
|
c65734ee69 | ||
|
|
8c8abfd6dc | ||
|
|
dfaee55ef3 | ||
|
|
72072d7d6b | ||
|
|
f1287e13f7 | ||
|
|
7749157596 | ||
|
|
682b4d54c5 | ||
|
|
245edd1b0e | ||
|
|
4d081ec87e | ||
|
|
a8dfc5ce3b | ||
|
|
68d0b5adbb | ||
|
|
c4ad3ac94c | ||
|
|
16e16bc220 | ||
|
|
73dfa21ba3 | ||
|
|
c31556c6d1 | ||
|
|
2083ac6e2a | ||
|
|
22ee839d05 | ||
|
|
5634659ea3 | ||
|
|
e18122e88b | ||
|
|
07ec8073fe | ||
|
|
8184ec4b70 | ||
|
|
190367d917 | ||
|
|
a5dc62f6c1 | ||
|
|
3e0c91ba4b | ||
|
|
7e065440fb | ||
|
|
e8883e9fdb | ||
|
|
1a8f824bad | ||
|
|
c1aaff220d | ||
|
|
6da6b2556b | ||
|
|
ca19fd2d7e | ||
|
|
2fac74b517 | ||
|
|
8b6daaa877 | ||
|
|
3af9d63261 | ||
|
|
c6cd2a5280 | ||
|
|
0bb84efe75 | ||
|
|
3ec15ac2bd | ||
|
|
750690503e | ||
|
|
54950d3423 | ||
|
|
014aa3d157 | ||
|
|
cc7ed13b9b | ||
|
|
6552581a17 | ||
|
|
f60e2a7aac | ||
|
|
cacae8d12d | ||
|
|
4a1013f2de | ||
|
|
d0b9baab13 | ||
|
|
96665c16cb | ||
|
|
39b9f80302 | ||
|
|
1602a3a055 | ||
|
|
fafaea7edc | ||
|
|
e6fb96cfd4 | ||
|
|
e612673ea0 | ||
|
|
fd2406f94e | ||
|
|
cd146415d1 | ||
|
|
2740c965c0 | ||
|
|
6669165b6b | ||
|
|
a06bcd4c57 | ||
|
|
6df1f6fad1 | ||
|
|
683befaec1 | ||
|
|
10f27e2ff2 | ||
|
|
d121a94c20 | ||
|
|
567071750b | ||
|
|
115053930e | ||
|
|
ef1346602e | ||
|
|
9417194751 | ||
|
|
69ba806528 | ||
|
|
ae9d58d625 | ||
|
|
d6bab0169f | ||
|
|
d7dd6f3814 | ||
|
|
edfab09eb9 | ||
|
|
0575623dff | ||
|
|
fc8b13c993 | ||
|
|
b531bf1349 | ||
|
|
43ced30f11 | ||
|
|
106bc1c9fc | ||
|
|
f64ee433b7 | ||
|
|
3eb7f52e39 | ||
|
|
7f3dc9b5c4 | ||
|
|
bcdd79320b | ||
|
|
2453abfbea | ||
|
|
efd88c5676 | ||
|
|
4966611866 | ||
|
|
00fe6d95da | ||
|
|
b7521c0fe2 | ||
|
|
a1d942e5c3 | ||
|
|
9e9297838f | ||
|
|
6403242f48 | ||
|
|
737cf3d957 | ||
|
|
8f2f480628 | ||
|
|
a5e0115b19 | ||
|
|
63d0734c71 | ||
|
|
b017fcfe9a | ||
|
|
911d121bb9 | ||
|
|
1c10497b68 | ||
|
|
d96e45ba5b | ||
|
|
657b3a674d | ||
|
|
5177d8c854 | ||
|
|
b2b989434d | ||
|
|
3e9861eecf | ||
|
|
3fc69f4140 | ||
|
|
b1e85c7ceb | ||
|
|
1d994f7330 | ||
|
|
0e76e35b6f | ||
|
|
29e2744155 | ||
|
|
6390bb2b09 | ||
|
|
6f2a6dfbc5 | ||
|
|
b6684ea4f5 | ||
|
|
2857ed5c35 | ||
|
|
8771d352d4 | ||
|
|
748c9f5cb7 | ||
|
|
646a419453 | ||
|
|
c98dfa2556 | ||
|
|
7195e44dce | ||
|
|
c9e2739500 | ||
|
|
2d8e75cab4 | ||
|
|
5a3a56abd8 | ||
|
|
7b89a5f656 | ||
|
|
a4396ebe0f | ||
|
|
85877f3adc | ||
|
|
87335de8a8 | ||
|
|
12405f9f41 | ||
|
|
168b0a0ecb | ||
|
|
234bfae0d5 | ||
|
|
4ac9a65049 | ||
|
|
a8e41f081c | ||
|
|
261c7ad9e4 | ||
|
|
fe96d5cf0a | ||
|
|
8574129892 | ||
|
|
6df12b3f00 | ||
|
|
7f8d306c9c | ||
|
|
9d3f11b493 | ||
|
|
38cc211762 | ||
|
|
e0eabc75c0 | ||
|
|
798502b204 | ||
|
|
9d22f4208f | ||
|
|
56dedc49e3 | ||
|
|
2f0551074c | ||
|
|
d6eb625815 | ||
|
|
4c45cbea18 | ||
|
|
897690d997 | ||
|
|
5a1351f141 | ||
|
|
c22be38747 | ||
|
|
f91f89d409 | ||
|
|
113f43ec42 | ||
|
|
7ef18b6b35 | ||
|
|
a91448c83a | ||
|
|
80b1f2a494 | ||
|
|
57817397a0 | ||
|
|
10fa2a7806 | ||
|
|
9a62d2f8ad | ||
|
|
49816e67bd | ||
|
|
fe536f3fa8 | ||
|
|
c54d513bdd | ||
|
|
dd975ab00d | ||
|
|
2944f7603d | ||
|
|
58f7b4ed7c | ||
|
|
cbea06026a | ||
|
|
8207af9460 | ||
|
|
921fcc0723 | ||
|
|
445fc55772 | ||
|
|
09fbbdbb04 | ||
|
|
4b0e983323 | ||
|
|
ee9f987234 | ||
|
|
f407e3da55 | ||
|
|
f1f7e0e6f9 | ||
|
|
7e93567b18 | ||
|
|
2c8d6e86cc | ||
|
|
bb6300b032 | ||
|
|
e96c5b5f39 | ||
|
|
672c410235 | ||
|
|
459cf64403 | ||
|
|
0158ab6926 | ||
|
|
4e189fe6e7 | ||
|
|
b78ecb1568 | ||
|
|
a122b9fa7a | ||
|
|
323daae63e | ||
|
|
e754f50778 | ||
|
|
034cf22d4d | ||
|
|
2cc9071791 | ||
|
|
b510c70c1e | ||
|
|
001431d326 | ||
|
|
e64435a5c1 | ||
|
|
9c47b767b4 | ||
|
|
2870874329 | ||
|
|
d54fca4e58 | ||
|
|
dcbf538416 | ||
|
|
5b79922b5e | ||
|
|
41b2645dec | ||
|
|
76226e0147 | ||
|
|
76c5aa8533 | ||
|
|
265fb8a5e2 | ||
|
|
8a1a900733 | ||
|
|
20ae7d562b | ||
|
|
c1bfdd893f | ||
|
|
ec2ea37ad2 | ||
|
|
bced73c947 | ||
|
|
5b6585f57d | ||
|
|
c6b844977a | ||
|
|
47eab397ba | ||
|
|
bfe812ea6b | ||
|
|
db1995e63a | ||
|
|
81a2ab599f | ||
|
|
74687c25f5 | ||
|
|
d025066fae | ||
|
|
80ce569874 | ||
|
|
ee13ea74f1 | ||
|
|
40f24e0ea3 | ||
|
|
b523cfc01d | ||
|
|
38dabcf6b2 | ||
|
|
ee6a35d750 | ||
|
|
92d2e1f8d7 | ||
|
|
98d238daa4 | ||
|
|
036fd61a50 | ||
|
|
91cfcc21ff | ||
|
|
132f71d504 | ||
|
|
861e125a4f | ||
|
|
230e65313a | ||
|
|
8a185deefa | ||
|
|
7b9557df90 | ||
|
|
ec5b72f8d5 | ||
|
|
466dd22b44 | ||
|
|
f682002b84 | ||
|
|
7d34caac83 | ||
|
|
28a18303f3 | ||
|
|
3e3a59768e | ||
|
|
d4b9bb9894 | ||
|
|
e01741b557 | ||
|
|
7ec24ad67a | ||
|
|
eff10bbc1d | ||
|
|
73f7278497 | ||
|
|
6d59887487 | ||
|
|
21aca68680 | ||
|
|
214f5e6411 | ||
|
|
2b5ce6ef51 | ||
|
|
b0fd187cba | ||
|
|
c3cd247d4b | ||
|
|
5d911e9450 | ||
|
|
a56d51c594 | ||
|
|
ef328c5497 | ||
|
|
49e4cdb8b9 | ||
|
|
ee52365e88 | ||
|
|
f3060caf04 | ||
|
|
bfef0bc2e9 | ||
|
|
da9926d574 | ||
|
|
ebc8361933 | ||
|
|
71fe046937 | ||
|
|
d5ff7104e5 | ||
|
|
cd4895690a | ||
|
|
1ecf2bcbd5 | ||
|
|
c3d6cc91ec | ||
|
|
6fce1ac267 | ||
|
|
9f24639568 | ||
|
|
8b30023f0d | ||
|
|
c507836617 | ||
|
|
6152bab28d | ||
|
|
6ae29df4d7 | ||
|
|
de54fd4c64 | ||
|
|
859721f3cf | ||
|
|
d134d78979 | ||
|
|
7b81f12dad | ||
|
|
d279161cee | ||
|
|
b5bf819256 | ||
|
|
384724fd11 | ||
|
|
5f70746d39 | ||
|
|
088806ba4c | ||
|
|
45ba4ed594 | ||
|
|
edfa1b3a69 | ||
|
|
db6009126d | ||
|
|
5255cbf5e3 | ||
|
|
eb87cf6f12 | ||
|
|
0b6fba34a3 | ||
|
|
c8b5ee1e54 | ||
|
|
a73ecec11f | ||
|
|
c223464cd0 | ||
|
|
39d09c04a2 | ||
|
|
db5494b316 | ||
|
|
c3dab09a94 | ||
|
|
3ddcbce989 | ||
|
|
0cf19ef66a | ||
|
|
655891170f | ||
|
|
93423a0812 | ||
|
|
78f33f5d6e | ||
|
|
209b7da3b2 | ||
|
|
6f71260acf | ||
|
|
ec6c3f2686 | ||
|
|
62e28d0a72 | ||
|
|
470642f2b7 | ||
|
|
b5002eb6a4 | ||
|
|
ee5698b3a9 | ||
|
|
728ff231ab | ||
|
|
542f938ce2 | ||
|
|
e24d0ac94d | ||
|
|
da2e2544ee | ||
|
|
72add5ab27 | ||
|
|
9ac72ee53f | ||
|
|
c3dac2e385 | ||
|
|
92294a4a92 | ||
|
|
69ff009264 | ||
|
|
27b157580e | ||
|
|
3f288bc9ea | ||
|
|
ce1b9a7daf | ||
|
|
f0512d1a52 | ||
|
|
51866fbd34 | ||
|
|
ee13bc6775 | ||
|
|
e86f62c3e8 | ||
|
|
6c3bf629a1 | ||
|
|
575e779b55 | ||
|
|
dc56ad9816 | ||
|
|
e7d04fc103 | ||
|
|
e2d7d413ef | ||
|
|
e7e9aa0dfa | ||
|
|
f88300a153 | ||
|
|
e54087ece1 | ||
|
|
54561fd2bc | ||
|
|
479c5a514a | ||
|
|
f3c7e1a9dd | ||
|
|
70b5b2f5c6 | ||
|
|
d7811f72ad | ||
|
|
aa20486485 | ||
|
|
33f302a06b | ||
|
|
24cb739d1f | ||
|
|
f0abd6173d | ||
|
|
1817d8f631 | ||
|
|
a308ad5bd7 | ||
|
|
b360527931 | ||
|
|
52b042971a | ||
|
|
2d2778eabf | ||
|
|
d55f8f0492 | ||
|
|
b44d0ea088 | ||
|
|
d981456ddc | ||
|
|
b22c4c4307 | ||
|
|
afc8cc550a | ||
|
|
83b642e98f | ||
|
|
d5d635b7f3 | ||
|
|
6b89e6c381 | ||
|
|
be0dd09801 | ||
|
|
b76cd4abd2 | ||
|
|
0dbf1230bc | ||
|
|
4fd9570332 | ||
|
|
8d77e48190 | ||
|
|
dcce65b2b3 | ||
|
|
4ce31555b2 | ||
|
|
5ed4bc97f3 | ||
|
|
54e37be591 | ||
|
|
eaa717b88a | ||
|
|
bbbc202ee6 | ||
|
|
97364fd0b6 | ||
|
|
c34f11a92f | ||
|
|
e31fc877e2 | ||
|
|
e069fc439e | ||
|
|
5250fcdf08 | ||
|
|
9876ba53f8 | ||
|
|
64662bef8d | ||
|
|
0b8d9084fc | ||
|
|
7be49249d3 | ||
|
|
8a6a8b9623 | ||
|
|
6fc88ff32e | ||
|
|
50928a5027 | ||
|
|
3a431056e2 | ||
|
|
53c3e5f0ab | ||
|
|
0edb025257 | ||
|
|
fded4dbea2 | ||
|
|
7e20e16cff | ||
|
|
1e88f0702a | ||
|
|
68333d34a1 | ||
|
|
740b3f6ae2 | ||
|
|
28fcc53e45 | ||
|
|
2ca477c57f | ||
|
|
9a11d3efd9 | ||
|
|
10d5377ed8 | ||
|
|
ee14efd3c2 | ||
|
|
b4be7d65a6 | ||
|
|
52e1bfae2a | ||
|
|
9c1e703777 | ||
|
|
b49821956a | ||
|
|
a61ba1e7c4 | ||
|
|
d30cc1e119 | ||
|
|
74a3dfc4e1 | ||
|
|
3fe9448229 | ||
|
|
a5cfdfd233 | ||
|
|
bdc19b7c8a | ||
|
|
e92cc8fe2b | ||
|
|
6ee4c62cae | ||
|
|
b047402294 | ||
|
|
7693cecd17 | ||
|
|
558f014d43 | ||
|
|
48508cb5b7 | ||
|
|
44c98e8654 | ||
|
|
9782c264e9 | ||
|
|
9cede6b372 | ||
|
|
decd960867 | ||
|
|
71028e0f06 | ||
|
|
52e96bc0e2 | ||
|
|
178ff62d6a | ||
|
|
9d335eb5cb | ||
|
|
20da3e6352 | ||
|
|
6381959850 | ||
|
|
8916455e4f | ||
|
|
8e214e838e | ||
|
|
23acd3ce01 | ||
|
|
a2e3af0523 | ||
|
|
5455d34f8c | ||
|
|
84512ac77d | ||
|
|
1ec0327ed7 | ||
|
|
0f07b63fd1 | ||
|
|
88ef475629 | ||
|
|
ade61fa756 | ||
|
|
cfc5f7bb2d | ||
|
|
ae9f8304fa | ||
|
|
55755a8e5b | ||
|
|
080050fac2 | ||
|
|
a243ea6353 | ||
|
|
51d2174c0b | ||
|
|
e75db0b14d | ||
|
|
c59a292719 | ||
|
|
be5b8b8dff | ||
|
|
525220b14e | ||
|
|
a9d29c2264 | ||
|
|
8f54dc06a2 | ||
|
|
7daf97f90a | ||
|
|
2cae017738 | ||
|
|
e172f00e0e | ||
|
|
412dacf8be | ||
|
|
cdacf026e4 | ||
|
|
0ca6408580 | ||
|
|
9627a6142d | ||
|
|
6cc783f20b | ||
|
|
3136a75f4d | ||
|
|
a9101f8608 | ||
|
|
af043eda15 | ||
|
|
35c210d36f | ||
|
|
3ed0440bd2 | ||
|
|
c13cff37ef | ||
|
|
fce734662f | ||
|
|
e0ba1a2cd2 | ||
|
|
c72fca2711 | ||
|
|
ae17d88518 | ||
|
|
e19fc49a5f | ||
|
|
95c0378e3c | ||
|
|
7ee3cfd7c9 | ||
|
|
bd2cdeeeab | ||
|
|
77cd93ef89 | ||
|
|
5b063679b5 | ||
|
|
09093a9954 | ||
|
|
df0cfa9735 | ||
|
|
64d7489fd2 | ||
|
|
ecedcd0e7f | ||
|
|
3dff91d691 | ||
|
|
e131ef3714 | ||
|
|
ea0bc278ba | ||
|
|
b553c23d5b | ||
|
|
4f954896a8 | ||
|
|
b259f8b752 | ||
|
|
8be8a8e41b | ||
|
|
79aa060e21 | ||
|
|
f9500729b7 | ||
|
|
204a19e67f | ||
|
|
e6ffe3464c | ||
|
|
0384364c3e | ||
|
|
763facfd78 | ||
|
|
bc88f1dafa | ||
|
|
0c055a1215 | ||
|
|
938d7951ab | ||
|
|
b4466bd9b1 | ||
|
|
31f76aa464 | ||
|
|
c887c164dc | ||
|
|
115ac00222 | ||
|
|
50e79bc087 | ||
|
|
abda616f84 | ||
|
|
9c3048580a | ||
|
|
c1d5faa32a | ||
|
|
d127d8686a | ||
|
|
bc9856b570 | ||
|
|
855071cc19 | ||
|
|
b179540e80 | ||
|
|
6a8e4690d3 | ||
|
|
917ea6ac57 | ||
|
|
7b47a1e842 | ||
|
|
bcd87009e2 | ||
|
|
caf85737c3 | ||
|
|
e1516e0159 | ||
|
|
ee1111e4c9 | ||
|
|
268fe0004c | ||
|
|
0c92a64bb3 | ||
|
|
8b61692754 | ||
|
|
663e6f3ec0 | ||
|
|
17633f5460 | ||
|
|
98c2d2c41b | ||
|
|
5135ff73cb | ||
|
|
58a82cd578 | ||
|
|
d86ea8623b | ||
|
|
cdeeff988e | ||
|
|
930ff266f2 | ||
|
|
d5c0fe632f | ||
|
|
3c5c5eeec2 | ||
|
|
56f017c60c | ||
|
|
b6517840ca | ||
|
|
1ccfea5aa9 | ||
|
|
7e858f4b8d | ||
|
|
7b4f368307 | ||
|
|
06a3502ed8 | ||
|
|
a9a43144ca | ||
|
|
dd968a8ccf | ||
|
|
0d6e1afe54 | ||
|
|
7d9faffd4b | ||
|
|
d7df065320 | ||
|
|
84d4d7f9d9 | ||
|
|
733d6fe56c | ||
|
|
8350544092 | ||
|
|
6a63bc2788 | ||
|
|
66e8c1600e | ||
|
|
82b8d68ffb | ||
|
|
b86bbcd67e | ||
|
|
38b6d607aa | ||
|
|
e1647a5a08 | ||
|
|
bc25190fc7 | ||
|
|
e3a41321cc | ||
|
|
2fd86c93fc | ||
|
|
2b8c461e04 | ||
|
|
a54692d165 | ||
|
|
4b4c59a4bb | ||
|
|
81d688107e | ||
|
|
6e003934fc | ||
|
|
37e1b20ec1 | ||
|
|
d1787b50fd | ||
|
|
9dfc346998 | ||
|
|
9ab4c19945 | ||
|
|
3bab119fa5 | ||
|
|
1fdf3e2aae | ||
|
|
4810aa65a4 | ||
|
|
f798552cf1 | ||
|
|
4dc030d081 | ||
|
|
216499d78b | ||
|
|
60f636ee15 | ||
|
|
f0bf117a04 | ||
|
|
788b6ce821 | ||
|
|
503cd84919 | ||
|
|
118e26f8e2 | ||
|
|
5355881332 | ||
|
|
b94b50a808 | ||
|
|
9b07d32c02 | ||
|
|
986a2851bf | ||
|
|
6474f2c7c2 | ||
|
|
99f7fe736a | ||
|
|
e80d8db417 | ||
|
|
320c53eab0 | ||
|
|
4d5b73df85 | ||
|
|
0faf82702b | ||
|
|
194a8f56e1 | ||
|
|
f046c00d3b | ||
|
|
488353c977 | ||
|
|
c45c604997 | ||
|
|
b2a4ea9304 | ||
|
|
8dc7bf883d | ||
|
|
61f186c8a3 | ||
|
|
e88623e3c8 | ||
|
|
4652db34a4 | ||
|
|
05d72385b5 | ||
|
|
9bb408e1a9 | ||
|
|
10e532bce9 | ||
|
|
4ab7e05e02 | ||
|
|
1cc58e4e09 | ||
|
|
fdaac6df67 | ||
|
|
1d42a343d2 | ||
|
|
0ce34be41d | ||
|
|
5fba913207 | ||
|
|
f7252645ba | ||
|
|
e48d19f895 | ||
|
|
6bad0ad9c4 | ||
|
|
dc5b7dc102 | ||
|
|
55eafb3a9a | ||
|
|
5b6dd36307 | ||
|
|
175c39e1d0 | ||
|
|
84b12574de | ||
|
|
efbb040e3f | ||
|
|
79e3c67bbd | ||
|
|
527099ae72 | ||
|
|
e2f0feef3c | ||
|
|
30e97ad9ec | ||
|
|
07dc76eff0 | ||
|
|
e59dc81658 | ||
|
|
f40443359d | ||
|
|
6b0f2ef4bd | ||
|
|
12aa03f5b8 | ||
|
|
73a96dc588 | ||
|
|
980cd5bfd8 | ||
|
|
86cc9f3dfb | ||
|
|
1ae604fcf4 | ||
|
|
5e93fe96d3 | ||
|
|
31745320c8 | ||
|
|
2da6cd7f84 | ||
|
|
6e0e1ad9cb | ||
|
|
dd62c94d05 | ||
|
|
ee70b99143 | ||
|
|
b3a526814e | ||
|
|
69a15ae173 | ||
|
|
1d7f95da8e | ||
|
|
8ec57d145e | ||
|
|
3ef9f6f016 | ||
|
|
990b676e13 | ||
|
|
5cdfe9c7ae | ||
|
|
033d1eb7af | ||
|
|
ac62ef430d | ||
|
|
928be0f1fd | ||
|
|
6f75290678 | ||
|
|
8c2b50c7ed | ||
|
|
2b1695e09b | ||
|
|
ef604f6100 | ||
|
|
f3c5745468 | ||
|
|
e4835f535d | ||
|
|
33c2873ae9 | ||
|
|
dac4bb22d3 | ||
|
|
b52c80e85c | ||
|
|
f15c6b68b6 | ||
|
|
3f778d70f7 | ||
|
|
6fc114d681 | ||
|
|
9a9d09845c | ||
|
|
7fa687b3e1 | ||
|
|
493da54113 | ||
|
|
541929258b | ||
|
|
370f242fa2 | ||
|
|
7047c67a5e | ||
|
|
18c75a81f9 | ||
|
|
01c747e7db | ||
|
|
186aedda98 | ||
|
|
ca0e25b1a1 | ||
|
|
f87a694d10 | ||
|
|
006227baed | ||
|
|
4d28b5ed22 | ||
|
|
499475bb41 | ||
|
|
666dae4229 | ||
|
|
ac1c041377 | ||
|
|
0366ea39c5 | ||
|
|
80f53176d9 | ||
|
|
40c02989f1 | ||
|
|
50e190ff54 | ||
|
|
dd20a297d6 | ||
|
|
c0ad29c06c | ||
|
|
d091d4a8bb | ||
|
|
381b845307 | ||
|
|
48cdedc97b | ||
|
|
7c6cd3a9e1 | ||
|
|
bcdd73369f | ||
|
|
86bec20b56 | ||
|
|
c3b2b89473 | ||
|
|
85f05c57d1 | ||
|
|
16d91246c4 | ||
|
|
726cb43be9 | ||
|
|
288302c2cf | ||
|
|
609671aabc | ||
|
|
b9a8315696 | ||
|
|
27e18b6efa | ||
|
|
9d331ce04b | ||
|
|
916569102c | ||
|
|
28f9b9b611 | ||
|
|
7679620f4b | ||
|
|
8a11da4e14 | ||
|
|
f11867d810 | ||
|
|
6f8501e9a1 | ||
|
|
37fe6d56e5 | ||
|
|
ff8f11d79c | ||
|
|
cbc113492a | ||
|
|
74702554da | ||
|
|
bd29015022 | ||
|
|
2192805360 | ||
|
|
db0b93c0ad | ||
|
|
94947f2523 | ||
|
|
29c6e22024 | ||
|
|
390f3cf35b | ||
|
|
20c033b484 | ||
|
|
8068ef96b6 | ||
|
|
9d36258923 | ||
|
|
9fdeb82328 | ||
|
|
2abfae283c | ||
|
|
b6848a3edb | ||
|
|
e2bf9eb8e9 | ||
|
|
a77659e47d | ||
|
|
e9da14ac2e | ||
|
|
a4bf22e620 | ||
|
|
a6b4065e13 | ||
|
|
07ebf35987 | ||
|
|
47ebd0789c | ||
|
|
166fd50451 | ||
|
|
0604d3dbf2 | ||
|
|
1fa258c2b4 | ||
|
|
3745c526f1 | ||
|
|
c123c702ab | ||
|
|
4aae7bcca6 | ||
|
|
aa62e93094 | ||
|
|
45862f4c16 | ||
|
|
3b1e0b66bb | ||
|
|
a7d8ccd265 | ||
|
|
d4c923a5cc | ||
|
|
e426cb1d1a | ||
|
|
3c87a0d0dc | ||
|
|
499766f6ab | ||
|
|
35a6436983 | ||
|
|
341745d4d5 | ||
|
|
78c1f2839e | ||
|
|
de2d967abd | ||
|
|
6611d92e21 | ||
|
|
e1a49ca426 | ||
|
|
f73ee5eade | ||
|
|
0d75d2585f | ||
|
|
3b0f944e23 | ||
|
|
aaeab58ce6 | ||
|
|
5894c02a34 | ||
|
|
f1221b510b | ||
|
|
274ab349f4 | ||
|
|
86392fb800 | ||
|
|
adc156ab2a | ||
|
|
47d3a450a4 | ||
|
|
5c89fd679d | ||
|
|
1cad759306 | ||
|
|
5b8b379feb | ||
|
|
f538fd8eb4 | ||
|
|
4dd5428b13 | ||
|
|
64ec220d5d | ||
|
|
cbfec0d281 | ||
|
|
80c044f2d3 | ||
|
|
1b2dda8c4c | ||
|
|
473bdad00b | ||
|
|
4482e75f38 | ||
|
|
43c530922a | ||
|
|
dd60f088ed | ||
|
|
0117aeafbf | ||
|
|
442861581a | ||
|
|
4e809c951d | ||
|
|
215fd2a11d | ||
|
|
13b705e227 | ||
|
|
8083804575 | ||
|
|
ec0916c59d | ||
|
|
7392cd1a1a | ||
|
|
2656a0e070 | ||
|
|
5b5df9ae8e | ||
|
|
fafbcc8e2f | ||
|
|
c55402caa2 | ||
|
|
d132dc7640 | ||
|
|
48a2e3844d | ||
|
|
d911bf3889 | ||
|
|
dcf53a1d12 | ||
|
|
3bdfa284a9 | ||
|
|
cb9f1eefd2 | ||
|
|
dd99a4b3dc | ||
|
|
e79a5de7df | ||
|
|
c24da0b886 | ||
|
|
be4fd51289 | ||
|
|
2cbdb95ce5 | ||
|
|
716ce49ce9 | ||
|
|
34b9ac8a5d | ||
|
|
c265f451f2 | ||
|
|
2058652fa4 | ||
|
|
50b3cfccb1 | ||
|
|
5e35aeca9e | ||
|
|
05798672c8 | ||
|
|
7929b6e756 | ||
|
|
2756900749 | ||
|
|
539853df36 | ||
|
|
651db36674 | ||
|
|
f9df545e3c | ||
|
|
5e62ede8d0 | ||
|
|
7f41c9a015 | ||
|
|
ac7727861e | ||
|
|
943a0938e7 | ||
|
|
6580d9478e | ||
|
|
36d411c25d | ||
|
|
8aae166a5b | ||
|
|
aaad0354e6 | ||
|
|
f3365dd251 | ||
|
|
aaa1370a36 | ||
|
|
c41b67ea08 | ||
|
|
0b562bcabc | ||
|
|
1e41a015b5 | ||
|
|
8b82cc7073 | ||
|
|
e19b30bd26 | ||
|
|
09897c8d0d | ||
|
|
d4ddbcea96 | ||
|
|
00af021abb | ||
|
|
8118623680 | ||
|
|
2c594dd996 | ||
|
|
d8b7156b5c | ||
|
|
d4a609c6cd | ||
|
|
bf243f30d3 | ||
|
|
3ce82facac | ||
|
|
fb1458815a | ||
|
|
2243b065e8 | ||
|
|
718694d5ee | ||
|
|
77f38cb6f1 | ||
|
|
ca017980a3 | ||
|
|
4105da206a | ||
|
|
34e7ca90fc | ||
|
|
687abe7803 | ||
|
|
9b1820a7ad | ||
|
|
5f320cc540 | ||
|
|
23b8f008e0 | ||
|
|
d95288175f | ||
|
|
b83f7ac06b | ||
|
|
f7af730b50 | ||
|
|
01be5bff02 | ||
|
|
e825df6448 | ||
|
|
ff41b0d435 | ||
|
|
e162377ca3 | ||
|
|
d1080d5456 | ||
|
|
64b5a76bec | ||
|
|
7cfb1bdc70 | ||
|
|
441951a93b | ||
|
|
154e91bb23 | ||
|
|
cb40511807 | ||
|
|
452c68115f | ||
|
|
b35123ba48 | ||
|
|
978e06a623 | ||
|
|
15c9fed60f | ||
|
|
2302179237 | ||
|
|
318e335137 | ||
|
|
11301a64fb | ||
|
|
1c912de9cc | ||
|
|
d1759fdfa9 | ||
|
|
c102bf28e3 | ||
|
|
e65059e431 | ||
|
|
5454f2abd0 | ||
|
|
cc4f5ba7ba | ||
|
|
062616e4dd | ||
|
|
6846a1cc11 | ||
|
|
6fd5ef2d99 | ||
|
|
87107413f5 | ||
|
|
5986953317 | ||
|
|
9d2dd2c49a | ||
|
|
54d99d6ab2 | ||
|
|
77b975d16f | ||
|
|
c464cc6376 | ||
|
|
93e84403bb | ||
|
|
5b8327038a | ||
|
|
eba0a3633b | ||
|
|
de73063977 | ||
|
|
eca9e8eb70 | ||
|
|
a4a44a41ae | ||
|
|
a02edb9e69 | ||
|
|
368cce93ff | ||
|
|
d8d11023d3 | ||
|
|
4803ce010e | ||
|
|
b7875fc02a | ||
|
|
544ca6035a | ||
|
|
0238558a4b | ||
|
|
bc414b698d | ||
|
|
ace1e21894 | ||
|
|
8a56a6836a | ||
|
|
83849e0a36 | ||
|
|
618f2e33e8 | ||
|
|
fe53caf997 | ||
|
|
d83074ae05 | ||
|
|
0cef6bd0ac | ||
|
|
d42b38699b | ||
|
|
98804cb860 | ||
|
|
d033e92234 | ||
|
|
ec7cef98d8 | ||
|
|
aedad89560 | ||
|
|
f45b3c87bf | ||
|
|
e94850f322 | ||
|
|
de80a540a7 | ||
|
|
392a86f585 | ||
|
|
265f5b77a7 | ||
|
|
aef2ac5c04 | ||
|
|
75e5e59385 | ||
|
|
6c21009c76 | ||
|
|
9192e0a28d | ||
|
|
47e201837f | ||
|
|
4847c5695c | ||
|
|
391feb698e | ||
|
|
a4714e5b75 | ||
|
|
4dae5ee264 | ||
|
|
7e9739db57 | ||
|
|
1e557f4bd9 | ||
|
|
ca19204cf4 | ||
|
|
03977354cb | ||
|
|
c43395fafa | ||
|
|
7cf6fe2209 | ||
|
|
9ea20bac42 | ||
|
|
945f49ab5e | ||
|
|
6c9a258d82 | ||
|
|
f2eeb4301c | ||
|
|
c117eaf5a2 | ||
|
|
3e43ff7414 | ||
|
|
bb21cf6f0e | ||
|
|
bfe6f299d0 | ||
|
|
e19ba47875 | ||
|
|
7227a2653d | ||
|
|
61665ddd10 | ||
|
|
0caac70994 | ||
|
|
83ba59749f | ||
|
|
20a429c048 | ||
|
|
cf43ca2a7b | ||
|
|
4001e21624 | ||
|
|
bbf819e2d1 | ||
|
|
0cb9bb3b54 | ||
|
|
5c91623148 | ||
|
|
5b913884cf | ||
|
|
346d4c587c | ||
|
|
d5b16c8b1a | ||
|
|
e78eeedc75 | ||
|
|
87db3cfad3 | ||
|
|
54fdc6866e | ||
|
|
2eaac80c86 | ||
|
|
b3d0848d09 | ||
|
|
0c6990bc95 | ||
|
|
d9bba67d18 | ||
|
|
140a3d0aef | ||
|
|
31fe800d6a | ||
|
|
3996bbb8cb | ||
|
|
c2599cb116 | ||
|
|
2c13074f6c | ||
|
|
059743a1b0 | ||
|
|
73cd1f4e88 | ||
|
|
a54806e5c1 | ||
|
|
e6a0521ca2 | ||
|
|
43eadf278c | ||
|
|
5f375a182d | ||
|
|
663dd6ed8b | ||
|
|
226c2a0d83 | ||
|
|
b4b4b6cb1c | ||
|
|
9985fc40f4 | ||
|
|
b1de4c8cba | ||
|
|
6a4e424630 | ||
|
|
ebb67c135e | ||
|
|
326dcf2470 | ||
|
|
86eb80ecdc | ||
|
|
2003ba356b | ||
|
|
037a000cc8 | ||
|
|
8a771450d2 | ||
|
|
1e7dc06ab8 | ||
|
|
ca841c56a8 | ||
|
|
79eebf1993 | ||
|
|
bbccf4acd5 | ||
|
|
9e7ddd5efc | ||
|
|
6089f443b9 | ||
|
|
84eb7031bb | ||
|
|
f22029bf3d | ||
|
|
d7b79b4481 | ||
|
|
b5faaf7116 | ||
|
|
b4f2ada820 | ||
|
|
8a66930bd7 | ||
|
|
2ebeed6753 | ||
|
|
23d8ba41d5 | ||
|
|
4f9e805d44 | ||
|
|
3f7107839e | ||
|
|
bb62c49489 | ||
|
|
ae6018355c | ||
|
|
0805ec051f | ||
|
|
e27b91ffb8 | ||
|
|
0a7b34eefc | ||
|
|
549cac90af | ||
|
|
ba0b41dd92 | ||
|
|
2df261e42b | ||
|
|
38adb35abe | ||
|
|
520ded60e3 | ||
|
|
ae56df7d4f | ||
|
|
412591dfaf | ||
|
|
57f8f1ec92 | ||
|
|
f0434789cf | ||
|
|
c2f6decb9c | ||
|
|
9eeed25418 | ||
|
|
67562081f7 | ||
|
|
41917eb1f2 | ||
|
|
c3e996f10f | ||
|
|
63f6827a0d | ||
|
|
96e2271cce | ||
|
|
ac3c83f966 | ||
|
|
b9c8e61d39 | ||
|
|
a6056408dd | ||
|
|
b9479cf7ab | ||
|
|
452a5badc1 | ||
|
|
d645bf0966 | ||
|
|
50addaa91e | ||
|
|
02a3bbaa3d | ||
|
|
a20d80565b | ||
|
|
56adb52a21 | ||
|
|
8c2fc6daf8 | ||
|
|
4bd9932703 | ||
|
|
2a1d4b7563 | ||
|
|
b394431f18 | ||
|
|
cc628717d8 | ||
|
|
f3e00133a0 | ||
|
|
606961f49d | ||
|
|
13591c7c00 | ||
|
|
28f4061892 | ||
|
|
018fe80bcb | ||
|
|
0a43ff9c13 | ||
|
|
9aae143833 | ||
|
|
c8e2531c8b | ||
|
|
9290004bb8 | ||
|
|
cbebefebc4 | ||
|
|
6f3897ce2c | ||
|
|
ea5878f590 | ||
|
|
46f8e50614 | ||
|
|
70dc97231e | ||
|
|
f6a053df6e | ||
|
|
af4ef8ad8d | ||
|
|
13797a1fb8 | ||
|
|
3ad8fb8634 | ||
|
|
ab43005422 | ||
|
|
b1f131964e | ||
|
|
1a87b69376 | ||
|
|
5a3b109e25 | ||
|
|
a67c7461ee | ||
|
|
e0aa4bb492 | ||
|
|
ab0947ee37 | ||
|
|
bd0227450e | ||
|
|
f438f1e9ef | ||
|
|
3f7b2c1ade | ||
|
|
6e35a3b3ce | ||
|
|
d3dd672640 | ||
|
|
2a46be8cf3 | ||
|
|
1b4370bde1 | ||
|
|
cc6a776034 | ||
|
|
2cfb3834f2 | ||
|
|
46135d830e | ||
|
|
318e42e35b | ||
|
|
c7f04e24d3 | ||
|
|
e4650eff58 | ||
|
|
869d91269d | ||
|
|
df1092ef33 | ||
|
|
4c5b2833b3 | ||
|
|
7fe653c350 | ||
|
|
661715733a | ||
|
|
f17cb1bf50 | ||
|
|
9ec06df79f | ||
|
|
67d0375b98 | ||
|
|
4882b8ba67 | ||
|
|
108760e17b | ||
|
|
f15e7e89d2 | ||
|
|
e2788aa729 | ||
|
|
772f99fd74 | ||
|
|
9bbcdeefd0 | ||
|
|
a21cc161de | ||
|
|
e818b7c206 | ||
|
|
5723d788a4 | ||
|
|
1d6698a754 | ||
|
|
1fce83b936 | ||
|
|
ccdd1ea6c4 | ||
|
|
348734584b | ||
|
|
c6a79ff72d | ||
|
|
b6f1391da3 | ||
|
|
ce94c0e729 | ||
|
|
58befe280c | ||
|
|
4c0f4ccb65 | ||
|
|
085677d511 | ||
|
|
0a922ad1dc | ||
|
|
83c3bb2f1a | ||
|
|
83087a45f0 | ||
|
|
cadf202107 | ||
|
|
36700d36a7 | ||
|
|
ad85f6e413 | ||
|
|
536526cc92 | ||
|
|
ac9c20b048 | ||
|
|
2db35f0ce7 | ||
|
|
dbfa7031d2 | ||
|
|
c2d0e86431 | ||
|
|
68ec6a9f5b | ||
|
|
753b0717be | ||
|
|
3bdad260b0 | ||
|
|
d205dc23e9 | ||
|
|
bdd26d71b2 | ||
|
|
8b2f6faf18 | ||
|
|
7c01bbddf8 | ||
|
|
1752ee3c8b | ||
|
|
5c2d8ffe33 | ||
|
|
7fecd5c8c6 | ||
|
|
19b7ff12ad | ||
|
|
b053234eb1 | ||
|
|
640d7bd365 | ||
|
|
8af68e779f | ||
|
|
3a1198cac5 | ||
|
|
022ab4516d | ||
|
|
17aac9b15f | ||
|
|
6c0c9abd57 | ||
|
|
70496c15e1 | ||
|
|
8b61e68bb7 | ||
|
|
bb75d80d33 | ||
|
|
157d7d45f5 | ||
|
|
b5cba73cc3 | ||
|
|
dd36264aad | ||
|
|
ddb47758f3 | ||
|
|
9539bbf78a | ||
|
|
0f8e7c3843 | ||
|
|
b835330714 | ||
|
|
310db14ed6 | ||
|
|
7f2e9d9a6b | ||
|
|
6cc9c09610 | ||
|
|
93c60c34e1 | ||
|
|
02c11dd4a7 | ||
|
|
40dc575aa4 | ||
|
|
f8101771c9 | ||
|
|
8f4d6973fb | ||
|
|
ced3a4bc19 | ||
|
|
cb22583212 | ||
|
|
414b35ea56 | ||
|
|
f469905d07 | ||
|
|
20f4b2c91d | ||
|
|
37543bd1d9 | ||
|
|
0dc0052e93 | ||
|
|
bd27473762 | ||
|
|
9dccf91da7 | ||
|
|
a1323eb204 | ||
|
|
e57c4406f3 | ||
|
|
fdd4b4ee22 | ||
|
|
8ef551bf9c | ||
|
|
2119fb4314 | ||
|
|
0166544319 | ||
|
|
874a64e5f6 | ||
|
|
e0c03a11ab | ||
|
|
3c7f80f58f | ||
|
|
229ea3f86c | ||
|
|
41eb386063 | ||
|
|
dfc7cd97a3 | ||
|
|
280ac26464 | ||
|
|
88cca8a6eb | ||
|
|
9c263e3e2b | ||
|
|
7d4e143dee | ||
|
|
3343c1afa4 | ||
|
|
b279df2e67 | ||
|
|
e6f340d245 | ||
|
|
bfc66cceaa | ||
|
|
1105b6bd94 | ||
|
|
694d390710 | ||
|
|
6b6b43402b | ||
|
|
6f46270735 | ||
|
|
ee5e34a19c | ||
|
|
70902b4051 | ||
|
|
f46304e8ae | ||
|
|
40252f0aa6 | ||
|
|
e7b9cc4705 | ||
|
|
867a26fe4f | ||
|
|
3890105cdc | ||
|
|
d2219a800a | ||
|
|
ccb59480bd | ||
|
|
b5c5209162 | ||
|
|
835b6761b7 | ||
|
|
f30c836696 | ||
|
|
090ce00afc | ||
|
|
377986d599 | ||
|
|
95e4d837ef | ||
|
|
e08e35984c | ||
|
|
a3b4c8a0f2 | ||
|
|
700e47d6e2 | ||
|
|
ea11f5ff3d | ||
|
|
758c7f2d84 | ||
|
|
ef06371c93 | ||
|
|
85a0f25b95 | ||
|
|
84b00b362f | ||
|
|
bfd7601cf9 | ||
|
|
4676a89963 | ||
|
|
8cd3c25b41 | ||
|
|
5f97603684 | ||
|
|
f1debd4701 | ||
|
|
1cd0d9a1f2 | ||
|
|
a6320bbad3 | ||
|
|
b1dd8e998b | ||
|
|
c2e8f06bfa | ||
|
|
08a8f7174a | ||
|
|
ce4c1d4f35 | ||
|
|
a0b9bd527e | ||
|
|
ce05ef7110 | ||
|
|
6a47d966a4 | ||
|
|
85d99de26b | ||
|
|
4a82251c62 | ||
|
|
e62c0a58a7 | ||
|
|
1f3e48f18f | ||
|
|
bbbe11790b | ||
|
|
13edf62824 | ||
|
|
558bc2e132 | ||
|
|
0f73129ab7 | ||
|
|
1373efaa39 | ||
|
|
5c37b777fc | ||
|
|
d4df3f2154 | ||
|
|
8ae424c5a3 | ||
|
|
cae19df058 | ||
|
|
8c211fc8df | ||
|
|
74a71f7824 | ||
|
|
12b51c5eb8 | ||
|
|
14069fd8e6 | ||
|
|
cd62f41606 | ||
|
|
109d4ee490 | ||
|
|
18ebec8276 | ||
|
|
c47b4f828f | ||
|
|
c3a0c0c451 | ||
|
|
6cb0de43ce | ||
|
|
83f0d3e03d | ||
|
|
eda4130703 | ||
|
|
ccba859812 | ||
|
|
de3cf5e8d7 | ||
|
|
ce305321b6 | ||
|
|
e6117e978e | ||
|
|
4b40898743 | ||
|
|
ae3a0ec27e | ||
|
|
d9458fb4ee | ||
|
|
27f67edb1a | ||
|
|
3ffea738e6 | ||
|
|
a63dd6020c | ||
|
|
d0678bc3e5 | ||
|
|
ce04a073ef | ||
|
|
c337a367f3 | ||
|
|
7ae40cb352 | ||
|
|
e8daab7971 | ||
|
|
78c3a5ccfa | ||
|
|
2142c75846 | ||
|
|
c724d8f614 | ||
|
|
af5f4ee724 | ||
|
|
01aa4394a6 | ||
|
|
2646519712 | ||
|
|
5b2efd563a | ||
|
|
e7b7432079 | ||
|
|
ea2ef4443b | ||
|
|
25f22ec561 | ||
|
|
5189231a34 | ||
|
|
bcbd30bb8a | ||
|
|
c245183101 | ||
|
|
4ce2a84df0 | ||
|
|
3c31d711b3 | ||
|
|
3f5d8390ba | ||
|
|
78edafcaac | ||
|
|
1ce3673006 | ||
|
|
3423de65fa | ||
|
|
0c81439bc3 | ||
|
|
77fb8ac240 | ||
|
|
979dfb8cc6 | ||
|
|
fe0289f2f5 | ||
|
|
6a64567dd7 | ||
|
|
8de8cd62ca | ||
|
|
cba27d2920 | ||
|
|
9ade179407 | ||
|
|
82b85431bd | ||
|
|
98778b1870 | ||
|
|
dfd46c23f9 | ||
|
|
3ac4407b88 | ||
|
|
8ea0d5212f | ||
|
|
acd350d833 | ||
|
|
2f4b9f619d | ||
|
|
70efd0274c | ||
|
|
33b3eea6ec | ||
|
|
113624691a | ||
|
|
afaec1a2e9 | ||
|
|
ddf39f2d57 | ||
|
|
2df5d95d70 | ||
|
|
64a808ac76 | ||
|
|
05dc7183cb | ||
|
|
e69e181090 | ||
|
|
a1269fa669 | ||
|
|
8369b5209f | ||
|
|
2aa3c0a2af | ||
|
|
ac65d8369e | ||
|
|
7a24532224 | ||
|
|
8057d668bb | ||
|
|
36f1bc4a8a | ||
|
|
beb8098b0a | ||
|
|
6e64a71382 | ||
|
|
3cbd57d9ad | ||
|
|
4f50b26af0 | ||
|
|
cb651b5866 | ||
|
|
3c1069c815 | ||
|
|
7f0020a407 | ||
|
|
c270c1c80c | ||
|
|
29ecc2d8bb | ||
|
|
13da1b8d28 | ||
|
|
0b338eaa28 | ||
|
|
46696865fd | ||
|
|
fcea3777c0 | ||
|
|
5023050d95 | ||
|
|
bed01a303f | ||
|
|
2c2cb84ca7 | ||
|
|
e9dda25c60 | ||
|
|
80ffbade22 | ||
|
|
7beb50caa7 | ||
|
|
e8ba43c479 | ||
|
|
dcd6bedc27 | ||
|
|
5bb76cc35c | ||
|
|
3e68d485f2 | ||
|
|
1945f09d06 | ||
|
|
2c66bdd6bb | ||
|
|
a4f3548bbf | ||
|
|
4276abc58b | ||
|
|
a795d93bc3 | ||
|
|
5df04cb763 | ||
|
|
ef54167a4a | ||
|
|
d42cb11b84 | ||
|
|
b257de4aba | ||
|
|
365b4babae | ||
|
|
6d48dffa2f | ||
|
|
8f2999b6af | ||
|
|
be6115fbfa | ||
|
|
2fcb8f5db7 | ||
|
|
0ab3f020ab | ||
|
|
64c23c2f5b | ||
|
|
ff16e0f6df | ||
|
|
1a82ba196b | ||
|
|
ed72c678f8 | ||
|
|
4ed8836a71 | ||
|
|
5529978fa7 | ||
|
|
66d84c9914 | ||
|
|
b85ddc4e4f | ||
|
|
e4a9e27a55 | ||
|
|
22645eea2e | ||
|
|
345c98ed62 | ||
|
|
b872ff0237 | ||
|
|
1b95718460 | ||
|
|
6a3580c556 | ||
|
|
16c9fba5de | ||
|
|
4e952af614 | ||
|
|
6344c3051c | ||
|
|
ab9f521cbd | ||
|
|
3a900e5bb7 | ||
|
|
b4d7741611 | ||
|
|
95fd79faf9 | ||
|
|
b79dc01016 | ||
|
|
bf562d7373 | ||
|
|
2e9f2ea3d3 | ||
|
|
177dbbc29a | ||
|
|
4712043e26 | ||
|
|
852acd5e4e | ||
|
|
9f1daabb2c | ||
|
|
938dd24cc9 | ||
|
|
57aad81b68 | ||
|
|
a91bcaaeb0 | ||
|
|
d04c21b198 | ||
|
|
4a0a42c2f1 | ||
|
|
cc7b9af50e | ||
|
|
68fef49c55 | ||
|
|
5d4b149884 | ||
|
|
5f20ae707d | ||
|
|
e9c915e6fe | ||
|
|
2ed158aba3 | ||
|
|
05050d53ad | ||
|
|
e391311512 | ||
|
|
3234c28f7c | ||
|
|
6fbd9cf24b | ||
|
|
bc5b63ffef | ||
|
|
788ef76f1c | ||
|
|
0872ec3204 | ||
|
|
0a5870208e | ||
|
|
3219334c3e | ||
|
|
79fd662676 | ||
|
|
34193fd8d9 | ||
|
|
2203766f77 | ||
|
|
235cbe0e57 | ||
|
|
f50f353b5d | ||
|
|
00afe6cc96 | ||
|
|
dd48e62b7e | ||
|
|
a1a780e847 | ||
|
|
fa87077211 | ||
|
|
6ac7145d2d | ||
|
|
f1226f19b2 | ||
|
|
3ecbf2af25 | ||
|
|
79f2e95bf9 | ||
|
|
faee50b238 | ||
|
|
807d4a3c00 | ||
|
|
073d112204 | ||
|
|
14f814b806 | ||
|
|
a288c2b3a3 | ||
|
|
fec16b0ac8 | ||
|
|
dd8717797e | ||
|
|
7e7c239f09 | ||
|
|
edd0e8abb1 | ||
|
|
d2b537d9a1 | ||
|
|
8c3df224ef | ||
|
|
967fd2a778 | ||
|
|
ea12e446ca | ||
|
|
c8cd2b510f | ||
|
|
8b05a8322b | ||
|
|
c98a51b26c | ||
|
|
e2717a031e | ||
|
|
8d33ce0154 | ||
|
|
92745aa950 | ||
|
|
cbc6bf6a89 | ||
|
|
f72575e75f | ||
|
|
0168f55f3e | ||
|
|
8b60ab86a1 | ||
|
|
7463a7a509 |
50
.circleci/config.yml
Normal file
50
.circleci/config.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
machine: true
|
||||
|
||||
working_directory: ~/.go_workspace/src/github.com/rclone/rclone
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull rclone/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
xgo \
|
||||
--targets=android/*,ios/* \
|
||||
.
|
||||
|
||||
- run:
|
||||
name: Prepare artifacts
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
|
||||
make circleci_upload
|
||||
fi
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# Ignore generated files in GitHub language statistics and diffs
|
||||
/MANUAL.* linguist-generated=true
|
||||
/rclone.1 linguist-generated=true
|
||||
31
.github/ISSUE_TEMPLATE.md
vendored
Normal file
31
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
|
||||
If you are reporting a bug or asking for a new feature then please use one of the templates here:
|
||||
|
||||
https://github.com/rclone/rclone/issues/new
|
||||
|
||||
otherwise fill in the form below.
|
||||
|
||||
Thank you
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### Output of `rclone version`
|
||||
|
||||
|
||||
|
||||
#### Describe the issue
|
||||
|
||||
|
||||
|
||||
50
.github/ISSUE_TEMPLATE/Bug.md
vendored
Normal file
50
.github/ISSUE_TEMPLATE/Bug.md
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a problem with rclone
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
|
||||
https://beta.rclone.org/
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
|
||||
Thank you
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
|
||||
#### What is your rclone version (output from `rclone version`)
|
||||
|
||||
|
||||
|
||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
|
||||
|
||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest a new feature or enhancement for rclone
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-)
|
||||
|
||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
|
||||
Here is a checklist of things to do:
|
||||
|
||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum first: https://forum.rclone.org/
|
||||
3. Make a feature request issue (this is the right place!).
|
||||
4. Be prepared to get involved making the feature :-)
|
||||
|
||||
Looking forward to your great idea!
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### What is your current rclone version (output from `rclone version`)?
|
||||
|
||||
|
||||
|
||||
#### What problem are you are trying to solve?
|
||||
|
||||
|
||||
|
||||
#### How do you think rclone should be changed to solve that?
|
||||
|
||||
|
||||
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
<!--
|
||||
Thank you very much for contributing code or documentation to rclone! Please
|
||||
fill out the following questions to make it easier for us to review your
|
||||
changes.
|
||||
|
||||
You do not need to check all the boxes below all at once, feel free to take
|
||||
your time and add more commits. If you're done and ready for review, please
|
||||
check the last box.
|
||||
-->
|
||||
|
||||
#### What is the purpose of this change?
|
||||
|
||||
<!--
|
||||
Describe the changes here
|
||||
-->
|
||||
|
||||
#### Was the change discussed in an issue or in the forum before?
|
||||
|
||||
<!--
|
||||
Link issues and relevant forum posts here.
|
||||
-->
|
||||
|
||||
#### Checklist
|
||||
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||
- [ ] I have added documentation for the changes if appropriate.
|
||||
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
- [ ] I'm done, this Pull Request is ready for review :-)
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,10 +1,10 @@
|
||||
*~
|
||||
_junk/
|
||||
rclone
|
||||
rclonetest/rclonetest
|
||||
build
|
||||
docs/public
|
||||
MANUAL.md
|
||||
MANUAL.html
|
||||
MANUAL.txt
|
||||
rclone.1
|
||||
rclone.iml
|
||||
.idea
|
||||
.history
|
||||
*.test
|
||||
*.log
|
||||
26
.golangci.yml
Normal file
26
.golangci.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
# golangci-lint configuration options
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- golint
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
|
||||
issues:
|
||||
# Enable some lints excluded by default
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
12
.travis.yml
12
.travis.yml
@@ -1,12 +0,0 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.2.2
|
||||
- 1.3.3
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go get ./...
|
||||
- go test -v ./...
|
||||
380
CONTRIBUTING.md
Normal file
380
CONTRIBUTING.md
Normal file
@@ -0,0 +1,380 @@
|
||||
# Contributing to rclone #
|
||||
|
||||
This is a short guide on how to contribute things to rclone.
|
||||
|
||||
## Reporting a bug ##
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug
|
||||
then please use the [rclone forum](https://forum.rclone.org/) instead
|
||||
of filing an issue.
|
||||
|
||||
When filing an issue, please include the following information if
|
||||
possible as well as a description of the problem. Make sure you test
|
||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
|
||||
* Rclone version (eg output from `rclone -V`)
|
||||
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
|
||||
## Submitting a pull request ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's GitHub
|
||||
page](https://github.com/rclone/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get -u github.com/rclone/rclone
|
||||
cd $GOPATH/src/github.com/rclone/rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
|
||||
Make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
|
||||
And get hacking.
|
||||
|
||||
When ready - run the unit tests for the code you changed
|
||||
|
||||
go test -v
|
||||
|
||||
Note that you may need to make a test remote, eg `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
Note the top level Makefile targets
|
||||
|
||||
* make check
|
||||
* make test
|
||||
|
||||
Both of these will be run by Travis when you make a pull request but
|
||||
you can do this yourself locally too. These require some extra go
|
||||
packages which you can install with
|
||||
|
||||
* make build_dep
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* Follow the [commit message guidelines](#commit-messages).
|
||||
* Add [unit tests](#testing) for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master with `git rebase master`
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push origin my-new-feature
|
||||
|
||||
Go to the GitHub website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to GitHub with `--force`.
|
||||
|
||||
## Enabling CI for your fork ##
|
||||
|
||||
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
|
||||
|
||||
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
|
||||
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
|
||||
websites, find your fork of rclone, and enable building there.
|
||||
|
||||
## Testing ##
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
|
||||
rclone contains a mixture of unit tests and integration tests.
|
||||
Because it is difficult (and in some respects pointless) to test cloud
|
||||
storage systems by mocking all their interfaces, rclone unit tests can
|
||||
run against any of the backends. This is done by making specially
|
||||
named remotes in the default config file.
|
||||
|
||||
If you wanted to test changes in the `drive` backend, then you would
|
||||
need to make a remote called `TestDrive`.
|
||||
|
||||
You can then run the unit tests in the drive directory. These tests
|
||||
are skipped if `TestDrive:` isn't defined.
|
||||
|
||||
cd backend/drive
|
||||
go test -v
|
||||
|
||||
You can then run the integration tests which tests all of rclone's
|
||||
operations. Normally these get run against the local filing system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -subdir
|
||||
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
all together with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
make test
|
||||
|
||||
This command is run daily on the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
|
||||
## Code Organisation ##
|
||||
|
||||
Rclone code is organised into a small number of top level directories
|
||||
with modules beneath.
|
||||
|
||||
* backend - the rclone backends for interfacing to cloud providers -
|
||||
* all - import this to load all the cloud providers
|
||||
* ...providers
|
||||
* bin - scripts for use while building or maintaining rclone
|
||||
* cmd - the rclone commands
|
||||
* all - import this to load all the commands
|
||||
* ...commands
|
||||
* docs - the documentation and website
|
||||
* content - adjust these docs only - everything else is autogenerated
|
||||
* fs - main rclone definitions - minimal amount of code
|
||||
* accounting - bandwidth limiting and statistics
|
||||
* asyncreader - an io.Reader which reads ahead
|
||||
* config - manage the config file and flags
|
||||
* driveletter - detect if a name is a drive letter
|
||||
* filter - implements include/exclude filtering
|
||||
* fserrors - rclone specific error handling
|
||||
* fshttp - http handling for rclone
|
||||
* fspath - path handling for rclone
|
||||
* hash - defines rclones hash types and functions
|
||||
* list - list a remote
|
||||
* log - logging facilities
|
||||
* march - iterates directories in lock step
|
||||
* object - in memory Fs objects
|
||||
* operations - primitives for sync, eg Copy, Move
|
||||
* sync - sync directories
|
||||
* walk - walk a directory
|
||||
* fstest - provides integration test framework
|
||||
* fstests - integration tests for the backends
|
||||
* mockdir - mocks an fs.Directory
|
||||
* mockobject - mocks an fs.Object
|
||||
* test_all - Runs integration tests for everything
|
||||
* graphics - the images used in the website etc
|
||||
* lib - libraries used by the backend
|
||||
* atexit - register functions to run when rclone exits
|
||||
* dircache - directory ID to name caching
|
||||
* oauthutil - helpers for using oauth
|
||||
* pacer - retries with backoff and paces operations
|
||||
* readers - a selection of useful io.Readers
|
||||
* rest - a thin abstraction over net/http for REST
|
||||
* vendor - 3rd party code managed by `go mod`
|
||||
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||
|
||||
## Writing Documentation ##
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
If you add a new general flag (not for a backend), then document it in
|
||||
`docs/content/docs.md` - the flags there are supposed to be in
|
||||
alphabetical order.
|
||||
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field. The first line of this is used
|
||||
for the flag help, the remainder is shown to the user in `rclone
|
||||
config` and is added to the docs with `make backenddocs`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
|
||||
Documentation for rclone sub commands is with their code, eg
|
||||
`cmd/ls/ls.go`.
|
||||
|
||||
## Making a release ##
|
||||
|
||||
There are separate instructions for making a release in the RELEASE.md
|
||||
file.
|
||||
|
||||
## Commit messages ##
|
||||
|
||||
Please make the first line of your commit message a summary of the
|
||||
change that a user (not a developer) of rclone would like to read, and
|
||||
prefix it with the directory of the change followed by a colon. The
|
||||
changelog gets made by looking at just these first lines so make it
|
||||
good!
|
||||
|
||||
If you have more to say about the commit, then enter a blank line and
|
||||
carry on the description. Remember to say why the change was needed -
|
||||
the commit itself shows what was changed.
|
||||
|
||||
Writing more is better than less. Comparing the behaviour before the
|
||||
change to that after the change is very useful. Imagine you are
|
||||
writing to yourself in 12 months time when you've forgotten everything
|
||||
about what you just did and you need to get up to speed quickly.
|
||||
|
||||
If the change fixes an issue then write `Fixes #1234` in the commit
|
||||
message. This can be on the subject line if it will fit. If you
|
||||
don't want to close the associated issue just put `#1234` and the
|
||||
change will get linked into the issue.
|
||||
|
||||
Here is an example of a short commit message:
|
||||
|
||||
```
|
||||
drive: add team drive support - fixes #885
|
||||
```
|
||||
|
||||
And here is an example of a longer one:
|
||||
|
||||
```
|
||||
mount: fix hang on errored upload
|
||||
|
||||
In certain circumstances if an upload failed then the mount could hang
|
||||
indefinitely. This was fixed by closing the read pipe after the Put
|
||||
completed. This will cause the write side to return a pipe closed
|
||||
error fixing the hang.
|
||||
|
||||
Fixes #1498
|
||||
```
|
||||
|
||||
## Adding a dependency ##
|
||||
|
||||
rclone uses the [go
|
||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||
support in go1.11 and later to manage its dependencies.
|
||||
|
||||
**NB** you must be using go1.11 or above to add a dependency to
|
||||
rclone. Rclone will still build with older versions of go, but we use
|
||||
the `go mod` command for dependencies which is only in go1.11 and
|
||||
above.
|
||||
|
||||
rclone can be built with modules outside of the GOPATH, but for
|
||||
backwards compatibility with older go versions, rclone also maintains
|
||||
a `vendor` directory with all the external code rclone needs for
|
||||
building.
|
||||
|
||||
The `vendor` directory is entirely managed by the `go mod` tool, do
|
||||
not add things manually.
|
||||
|
||||
To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency, add it to
|
||||
`go.mod` and `go.sum` and vendor it for older go versions.
|
||||
|
||||
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
|
||||
Please check in the changes generated by `go mod` including the
|
||||
`vendor` directory and `go.mod` and `go.sum` in a single commit
|
||||
separate from any other code changes with the title "vendor: add
|
||||
github.com/ncw/new_dependency". Remember to `git add` any new files
|
||||
in `vendor`.
|
||||
|
||||
## Updating a dependency ##
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
GO111MODULE=on go get -u github.com/pkg/errors
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
Check in in a single commit as above.
|
||||
|
||||
## Updating all the dependencies ##
|
||||
|
||||
In order to update all the dependencies then run `make update`. This
|
||||
just uses the go modules to update all the modules to their latest
|
||||
stable release. Check in the changes in a single commit as above.
|
||||
|
||||
This should be done early in the release cycle to pick up new versions
|
||||
of packages in time for them to get some testing.
|
||||
|
||||
## Updating a backend ##
|
||||
|
||||
If you update a backend then please run the unit tests and the
|
||||
integration tests for that backend.
|
||||
|
||||
Assuming the backend is called `remote`, make create a config entry
|
||||
called `TestRemote` for the tests to use.
|
||||
|
||||
Now `cd remote` and run `go test -v` to run the unit tests.
|
||||
|
||||
Then `cd fs` and run `go test -v -remote TestRemote:` to run the
|
||||
integration tests.
|
||||
|
||||
The next section goes into more detail about the tests.
|
||||
|
||||
## Writing a new backend ##
|
||||
|
||||
Choose a name. The docs here will use `remote` as an example.
|
||||
|
||||
Note that in rclone terminology a file system backend is called a
|
||||
remote or an fs.
|
||||
|
||||
Research
|
||||
|
||||
* Look at the interfaces defined in `fs/fs.go`
|
||||
* Study one or more of the existing remotes
|
||||
|
||||
Getting going
|
||||
|
||||
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
* box is a good one to start from if you have a directory based remote
|
||||
* b2 is a good one to start from if you have a bucket based remote
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||
* Make sure all tests pass with `go test -v`
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Once you've done that then you can use the integration test framework from the project root:
|
||||
* go install ./...
|
||||
* test_all -backend remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* `cd fs/sync`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* If you are making a bucket based remote, then check with this also
|
||||
* `go test -v -remote TestRemote: -subdir`
|
||||
* And if your remote defines `ListR` this also
|
||||
* `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
49
DISABLED.appveyor.yml
Normal file
49
DISABLED.appveyor.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\rclone\rclone
|
||||
|
||||
cache:
|
||||
- '%LocalAppData%\go-build'
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
ORIGPATH: '%PATH%'
|
||||
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
|
||||
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
|
||||
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||
PATH: '%PATHCC64%'
|
||||
RCLONE_CONFIG_PASS:
|
||||
secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk=
|
||||
|
||||
install:
|
||||
- choco install winfsp -y
|
||||
- choco install zip -y
|
||||
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
|
||||
|
||||
build_script:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go install
|
||||
- go build
|
||||
- make log_since_last_release > %TEMP%\git-log.txt
|
||||
- make version > %TEMP%\version
|
||||
- set /p RCLONE_VERSION=<%TEMP%\version
|
||||
- set PATH=%PATHCC32%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
|
||||
- set PATH=%PATHCC64%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
|
||||
|
||||
test_script:
|
||||
- make GOTAGS=cmount quicktest
|
||||
|
||||
artifacts:
|
||||
- path: rclone.exe
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
128
DISABLED.travis.yml
Normal file
128
DISABLED.travis.yml
Normal file
@@ -0,0 +1,128 @@
|
||||
---
|
||||
language: go
|
||||
sudo: required
|
||||
dist: xenial
|
||||
os:
|
||||
- linux
|
||||
go_import_path: github.com/rclone/rclone
|
||||
before_install:
|
||||
- git fetch --unshallow --tags
|
||||
- |
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
|
||||
choco install -y winfsp zip make
|
||||
cd ../.. # fix crlf in git checkout
|
||||
mv $TRAVIS_REPO_SLUG _old
|
||||
git config --global core.autocrlf false
|
||||
git clone _old $TRAVIS_REPO_SLUG
|
||||
cd $TRAVIS_REPO_SLUG
|
||||
fi
|
||||
install:
|
||||
- make vars
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
|
||||
- GO111MODULE=off
|
||||
- GITHUB_USER=ncw
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- go: 1.9.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.10.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.11.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
name: Linux
|
||||
env:
|
||||
- GOTAGS=cmount
|
||||
- BUILD_FLAGS='-include "^linux/"'
|
||||
- DEPLOY=true
|
||||
script:
|
||||
- make build_dep
|
||||
- make check
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
name: Go Modules / Race
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
- GOPROXY=https://proxy.golang.org
|
||||
script:
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
- go: 1.12.x
|
||||
name: Other OS
|
||||
env:
|
||||
- DEPLOY=true
|
||||
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
|
||||
script:
|
||||
- make
|
||||
- make compile_all
|
||||
- go: 1.12.x
|
||||
name: macOS
|
||||
os: osx
|
||||
env:
|
||||
- GOTAGS= # cmount doesn't work on osx travis for some reason
|
||||
- BUILD_FLAGS='-include "^darwin/" -cgo'
|
||||
- DEPLOY=true
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
script:
|
||||
- make
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
# - os: windows
|
||||
# name: Windows
|
||||
# go: 1.12.x
|
||||
# env:
|
||||
# - GOTAGS=cmount
|
||||
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
|
||||
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
|
||||
# #filter_secrets: false # works around a problem with secrets under windows
|
||||
# cache:
|
||||
# directories:
|
||||
# - ${LocalAppData}/go-build
|
||||
# script:
|
||||
# - make
|
||||
# - make quicktest
|
||||
# - make racequicktest
|
||||
- go: tip
|
||||
script:
|
||||
- make quicktest
|
||||
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: rclone/rclone
|
||||
all_branches: true
|
||||
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true
|
||||
92
MAINTAINERS.md
Normal file
92
MAINTAINERS.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
|
||||
|
||||
## Triaging Tickets ##
|
||||
|
||||
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
|
||||
|
||||
Rclone uses the labels like this:
|
||||
|
||||
* `bug` - a definite verified bug
|
||||
* `can't reproduce` - a problem which we can't reproduce
|
||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
* `enhancement: new remote` - a new rclone backend
|
||||
* `enhancement` - a new feature
|
||||
* `FUSE` - do do with `rclone mount` command
|
||||
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
* `maintenance` - internal enhancement, code re-organisation etc
|
||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
* `Remote: XXX` - which rclone backend this affects
|
||||
* `thinking` - not decided on the course of action yet
|
||||
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
|
||||
|
||||
The milestones have these meanings:
|
||||
|
||||
* v1.XX - stuff we would like to fit into this release
|
||||
* v1.XX+1 - stuff we are leaving until the next release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
|
||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
|
||||
## Closing Tickets ##
|
||||
|
||||
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||
|
||||
## Pull requests ##
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
|
||||
|
||||
## Merges ##
|
||||
|
||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||
|
||||
## Release cycle ##
|
||||
|
||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
|
||||
|
||||
High impact regressions should be fixed before the next release.
|
||||
|
||||
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
|
||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
|
||||
## Mailing list ##
|
||||
|
||||
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
|
||||
## TODO ##
|
||||
|
||||
I should probably make a dev@rclone.org to register with cloud providers.
|
||||
14673
MANUAL.html
generated
Normal file
14673
MANUAL.html
generated
Normal file
File diff suppressed because it is too large
Load Diff
19541
MANUAL.txt
generated
Normal file
19541
MANUAL.txt
generated
Normal file
File diff suppressed because it is too large
Load Diff
189
Makefile
189
Makefile
@@ -1,22 +1,92 @@
|
||||
TAG := $(shell git describe --tags)
|
||||
SHELL = bash
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
ifeq ($(BRANCH),$(LAST_TAG))
|
||||
BRANCH := master
|
||||
endif
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||
endif
|
||||
|
||||
.PHONY: rclone vars version
|
||||
|
||||
rclone:
|
||||
@go version
|
||||
go install -v ./...
|
||||
touch fs/version.go
|
||||
go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
cp -av `go env GOPATH`/bin/rclone .
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
version:
|
||||
@echo '$(TAG)'
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go test ./...
|
||||
cd fs && ./test_all.sh
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
-test_all 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test_all.log"
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@golangci-lint run $(LINTTAGS) ./...
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
go get -u github.com/goreleaser/nfpm/...
|
||||
go get -u github.com/aktau/github-release
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -u ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: make_manual.py docs/content/*.md
|
||||
./make_manual.py
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
|
||||
@@ -24,6 +94,15 @@ MANUAL.html: MANUAL.md
|
||||
MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||
@@ -32,7 +111,7 @@ clean:
|
||||
go clean ./...
|
||||
find . -name \*~ | xargs -r rm -f
|
||||
rm -rf build docs/public
|
||||
rm -f rclone rclonetest/rclonetest rclone.1 MANUAL.md MANUAL.html MANUAL.txt
|
||||
rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log
|
||||
|
||||
website:
|
||||
cd docs && hugo
|
||||
@@ -40,30 +119,92 @@ website:
|
||||
upload_website: website
|
||||
rclone -v sync docs/public memstore:www-rclone-org
|
||||
|
||||
tarball:
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
|
||||
sign_upload:
|
||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||
cd build && sha256sum rclone-v* | gpg --clearsign > SHA256SUMS
|
||||
|
||||
check_sign:
|
||||
cd build && gpg --verify MD5SUMS && gpg --decrypt MD5SUMS | md5sum -c
|
||||
cd build && gpg --verify SHA1SUMS && gpg --decrypt SHA1SUMS | sha1sum -c
|
||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||
|
||||
upload:
|
||||
rclone -v copy build/ memstore:downloads-rclone-org
|
||||
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
||||
|
||||
upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
./cross-compile $(TAG)
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
|
||||
serve:
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
|
||||
log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
circleci_upload:
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the binary builds from travis and appveyor
|
||||
fetch_binaries:
|
||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
tag:
|
||||
tag: doc
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n const Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
|
||||
git tag $(NEW_TAG)
|
||||
@echo "Add this to changelog in docs/content/changelog.md"
|
||||
@echo " * $(NEW_TAG) -" `date -I`
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
|
||||
@echo "Then commit the changes"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
|
||||
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
|
||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
git tag -f $(LAST_TAG)
|
||||
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
|
||||
|
||||
gen_tests:
|
||||
cd fstest/fstests && go generate
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
110
README.md
110
README.md
@@ -1,39 +1,101 @@
|
||||
[](http://rclone.org/)
|
||||
[](https://rclone.org/)
|
||||
|
||||
[Website](http://rclone.org) |
|
||||
[Documentation](http://rclone.org/docs/) |
|
||||
[Changelog](http://rclone.org/changelog/) |
|
||||
[Installation](http://rclone.org/install/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/) |
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone) [](https://godoc.org/github.com/ncw/rclone)
|
||||
[](https://travis-ci.org/rclone/rclone)
|
||||
[](https://ci.appveyor.com/project/rclone/rclone)
|
||||
[](https://circleci.com/gh/rclone/rclone/tree/master)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
# Rclone
|
||||
|
||||
* Google Drive
|
||||
* Amazon S3
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* The local filesystem
|
||||
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
||||
|
||||
Features
|
||||
## Storage providers
|
||||
|
||||
* MD5SUMs checked at all times for file integrity
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync mode to make a directory identical
|
||||
* Check mode to check all MD5SUMs
|
||||
* Can sync to and from network, eg two different Drive accounts
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
## Installation & documentation
|
||||
|
||||
* http://rclone.org/
|
||||
Please see the [rclone website](https://rclone.org/) for:
|
||||
|
||||
* [Installation](https://rclone.org/install/)
|
||||
* [Documentation & configuration](https://rclone.org/docs/)
|
||||
* [Changelog](https://rclone.org/changelog/)
|
||||
* [FAQ](https://rclone.org/faq/)
|
||||
* [Storage providers](https://rclone.org/overview/)
|
||||
* [Forum](https://forum.rclone.org/)
|
||||
* ...and more
|
||||
|
||||
## Downloads
|
||||
|
||||
* https://rclone.org/downloads/
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
72
RELEASE.md
72
RELEASE.md
@@ -1,19 +1,71 @@
|
||||
Required software for making a release
|
||||
Extra required software for making a release
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* [gox](https://github.com/mitchellh/gox) for cross compiling
|
||||
* Run `gox -build-toolchain`
|
||||
* This assumes you have your own source checkout
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
Making a release
|
||||
* go get -u -f -v ./...
|
||||
* make test
|
||||
* git status - make sure everything is checked in
|
||||
* Check travis & appveyor builds are green
|
||||
* make check
|
||||
* make test # see integration test server or run locally
|
||||
* make tag
|
||||
* edit docs/content/changelog.md
|
||||
* git commit -a -v
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross - not required for go >= 1.5
|
||||
* make cross
|
||||
* git push --tags origin master
|
||||
* # Wait for the appveyor and travis builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* git push --tags origin master
|
||||
* make upload_github
|
||||
* make startdev
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* GO111MODULE=on go get -u github.com/spf13/cobra@master
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEW_TAG=v1.XX.1 tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=v1.43.1 doc
|
||||
* git commit -a -v -m "Version v1.XX.1"
|
||||
* git tag -d -v1.XX.1
|
||||
* git tag -s -m "Version v1.XX.1" v1.XX.1
|
||||
* git push --tags -u origin v1.XX-fixes
|
||||
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
|
||||
* make TAG=v1.43.1 tarball
|
||||
* make TAG=v1.43.1 sign_upload
|
||||
* make TAG=v1.43.1 check_sign
|
||||
* make TAG=v1.43.1 upload
|
||||
* make TAG=v1.43.1 upload_website
|
||||
* make TAG=v1.43.1 upload_github
|
||||
* NB this overwrites the current beta so after the release, rebuild the last travis build
|
||||
* Announce!
|
||||
|
||||
189
azure-pipelines.yml
Normal file
189
azure-pipelines.yml
Normal file
@@ -0,0 +1,189 @@
|
||||
---
|
||||
# Azure pipelines build for rclone
|
||||
# Parts stolen shamelessly from all round the Internet, especially Caddy
|
||||
|
||||
trigger:
|
||||
tags:
|
||||
include:
|
||||
- '*'
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go1.9:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.9.7
|
||||
MAKE_QUICKTEST: true
|
||||
go1.10:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.10.8
|
||||
MAKE_QUICKTEST: true
|
||||
go1.11:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.11.8
|
||||
MAKE_QUICKTEST: true
|
||||
linux:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: cmount
|
||||
BUILD_FLAGS: '-include "^linux/"'
|
||||
MAKE_CHECK: true
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
other_os:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
|
||||
MAKE_COMPILE_ALL: true
|
||||
DEPLOY: true
|
||||
modules_race:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GO111MODULE: on
|
||||
GOPROXY: https://proxy.golang.org
|
||||
MAKE_QUICKTEST: true
|
||||
RACEMAKE_QUICKTEST: true
|
||||
mac:
|
||||
imageName: macos-10.13
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: "" # cmount doesn't work on osx travis for some reason
|
||||
BUILD_FLAGS: '-include "^darwin/" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
RACEMAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows:
|
||||
imageName: windows-2019
|
||||
gorootDir: C:\
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-include "^windows/amd64" -cgo' # 386 doesn't build yet
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
|
||||
variables:
|
||||
GOROOT: $(gorootDir)/go
|
||||
GOPATH: $(system.defaultWorkingDirectory)/gopath
|
||||
GOBIN: $(GOPATH)/bin
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
|
||||
GO111MODULE: 'off'
|
||||
GOTAGS: cmount
|
||||
GO_LATEST: false
|
||||
CPATH: ''
|
||||
|
||||
steps:
|
||||
- bash: |
|
||||
latestGo=$(curl "https://golang.org/VERSION?m=text")
|
||||
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
|
||||
echo "##vso[task.setvariable variable=GO_LATEST]true"
|
||||
echo "Latest Go version: $latestGo"
|
||||
condition: eq( variables['GO_VERSION'], 'latest' )
|
||||
displayName: "Get latest Go version"
|
||||
|
||||
- bash: |
|
||||
sudo rm -f $(which go)
|
||||
echo '##vso[task.prependpath]$(GOBIN)'
|
||||
echo '##vso[task.prependpath]$(GOROOT)/bin'
|
||||
mkdir -p '$(modulePath)'
|
||||
shopt -s extglob
|
||||
shopt -s dotglob
|
||||
mv !(gopath) '$(modulePath)'
|
||||
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
|
||||
|
||||
# Install Libraries (varies by platform)
|
||||
|
||||
- bash: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
displayName: Install Libraries on Linux
|
||||
|
||||
- bash: |
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
displayName: Install Libraries on macOS
|
||||
|
||||
- powershell: |
|
||||
choco install -y winfsp zip make
|
||||
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Libraries on Windows
|
||||
|
||||
# Install Go (this varies by platform)
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).linux-amd64.tar.gz"
|
||||
sudo mkdir $(gorootDir)
|
||||
sudo chown ${USER}:${USER} $(gorootDir)
|
||||
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-amd64.tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
displayName: Install Go on Linux
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).darwin-amd64.tar.gz"
|
||||
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-amd64.tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
displayName: Install Go on macOS
|
||||
|
||||
- powershell: |
|
||||
Write-Host "Downloading Go... (please be patient, I am very slow)"
|
||||
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-amd64.zip", "$(GO_VERSION).windows-amd64.zip")
|
||||
Write-Host "Extracting Go... (I'm slow too)"
|
||||
Expand-Archive "$(GO_VERSION).windows-amd64.zip" -DestinationPath "$(gorootDir)"
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Go on Windows
|
||||
|
||||
- bash: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Print Go version and environment
|
||||
|
||||
- script: |
|
||||
make
|
||||
make quicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Run tests
|
||||
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make racequicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Race test
|
||||
condition: eq( variables['RACEMAKE_QUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make build_dep
|
||||
make check
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Code quality test
|
||||
condition: eq( variables['MAKE_CHECK'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make compile_all
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Compile all architectures test
|
||||
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make vars # FIXME travis_beta
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Deploy built binaries
|
||||
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )
|
||||
54
backend/alias/alias.go
Normal file
54
backend/alias/alias.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "alias",
|
||||
Description: "Alias for an existing remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Required: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Remote == "" {
|
||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||
}
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
||||
}
|
||||
105
backend/alias/alias_internal_test.go
Normal file
105
backend/alias/alias_internal_test.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestAlias"
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
config.LoadConfig()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
config.FileSet(remoteName, "remote", root)
|
||||
}
|
||||
|
||||
func TestNewFS(t *testing.T) {
|
||||
type testEntry struct {
|
||||
remote string
|
||||
size int64
|
||||
isDir bool
|
||||
}
|
||||
for testi, test := range []struct {
|
||||
remoteRoot string
|
||||
fsRoot string
|
||||
fsList string
|
||||
wantOK bool
|
||||
entries []testEntry
|
||||
}{
|
||||
{"", "", "", true, []testEntry{
|
||||
{"four", -1, true},
|
||||
{"one%.txt", 6, false},
|
||||
{"three", -1, true},
|
||||
{"two.html", 7, false},
|
||||
}},
|
||||
{"", "four", "", true, []testEntry{
|
||||
{"five", -1, true},
|
||||
{"under four.txt", 9, false},
|
||||
}},
|
||||
{"", "", "four", true, []testEntry{
|
||||
{"four/five", -1, true},
|
||||
{"four/under four.txt", 9, false},
|
||||
}},
|
||||
{"four", "..", "", true, []testEntry{
|
||||
{"four", -1, true},
|
||||
{"one%.txt", 6, false},
|
||||
{"three", -1, true},
|
||||
{"two.html", 7, false},
|
||||
}},
|
||||
{"four", "../three", "", true, []testEntry{
|
||||
{"underthree.txt", 9, false},
|
||||
}},
|
||||
} {
|
||||
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
||||
|
||||
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
||||
require.NoError(t, err, what)
|
||||
prepare(t, remoteRoot)
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||
require.NoError(t, err, what)
|
||||
gotEntries, err := f.List(context.Background(), test.fsList)
|
||||
require.NoError(t, err, what)
|
||||
|
||||
sort.Sort(gotEntries)
|
||||
|
||||
require.Equal(t, len(test.entries), len(gotEntries), what)
|
||||
for i, gotEntry := range gotEntries {
|
||||
what := fmt.Sprintf("%s, entry=%d", what, i)
|
||||
wantEntry := test.entries[i]
|
||||
|
||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||
_, isDir := gotEntry.(fs.Directory)
|
||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFSNoRemote(t *testing.T) {
|
||||
prepare(t, "")
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, f)
|
||||
}
|
||||
|
||||
func TestNewFSInvalidRemote(t *testing.T) {
|
||||
prepare(t, "not_existing_test_remote:")
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, f)
|
||||
}
|
||||
1
backend/alias/test/files/four/five/underfive.txt
Normal file
1
backend/alias/test/files/four/five/underfive.txt
Normal file
@@ -0,0 +1 @@
|
||||
apple
|
||||
1
backend/alias/test/files/four/under four.txt
Normal file
1
backend/alias/test/files/four/under four.txt
Normal file
@@ -0,0 +1 @@
|
||||
beetroot
|
||||
1
backend/alias/test/files/one%.txt
Normal file
1
backend/alias/test/files/one%.txt
Normal file
@@ -0,0 +1 @@
|
||||
hello
|
||||
1
backend/alias/test/files/three/underthree.txt
Normal file
1
backend/alias/test/files/three/underthree.txt
Normal file
@@ -0,0 +1 @@
|
||||
rutabaga
|
||||
1
backend/alias/test/files/two.html
Normal file
1
backend/alias/test/files/two.html
Normal file
@@ -0,0 +1 @@
|
||||
potato
|
||||
34
backend/all/all.go
Normal file
34
backend/all/all.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/rclone/rclone/backend/alias"
|
||||
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/azureblob"
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
)
|
||||
1420
backend/amazonclouddrive/amazonclouddrive.go
Normal file
1420
backend/amazonclouddrive/amazonclouddrive.go
Normal file
File diff suppressed because it is too large
Load Diff
20
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
20
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
|
||||
// +build acd
|
||||
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
fstests.Run(t)
|
||||
}
|
||||
1532
backend/azureblob/azureblob.go
Normal file
1532
backend/azureblob/azureblob.go
Normal file
File diff suppressed because it is too large
Load Diff
18
backend/azureblob/azureblob_internal_test.go
Normal file
18
backend/azureblob/azureblob_internal_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// +build !plan9,!solaris
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
37
backend/azureblob/azureblob_test.go
Normal file
37
backend/azureblob/azureblob_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !plan9,!solaris
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
6
backend/azureblob/azureblob_unsupported.go
Normal file
6
backend/azureblob/azureblob_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris
|
||||
|
||||
package azureblob
|
||||
339
backend/b2/api/types.go
Normal file
339
backend/b2/api/types.go
Normal file
@@ -0,0 +1,339 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
type Error struct {
|
||||
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
|
||||
Code string `json:"code"` // A single-identifier code that identifies the error.
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal satisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
return e.Status == 403 // 403 errors shouldn't be retried
|
||||
}
|
||||
|
||||
var _ fserrors.Fataler = (*Error)(nil)
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||
// fits in a 64 bit integer such as the type "long" in the programming
|
||||
// language Java. It is intended to be compatible with Java's time
|
||||
// long. For example, it can be passed directly into the java call
|
||||
// Date.setTime(long time).
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
timestamp, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := time.Time(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
//
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is uninitialized
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return time.Time(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if time.Time(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if time.Time(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return time.Time(t).Equal(time.Time(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
type File struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
Size int64 `json:"size"` // The number of bytes in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
||||
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
||||
}
|
||||
|
||||
// ListBucketsRequest is parameters for b2_list_buckets call
|
||||
type ListBucketsRequest struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket.
|
||||
BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket.
|
||||
BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
type ListBucketsResponse struct {
|
||||
Buckets []Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesRequest struct {
|
||||
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
|
||||
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
|
||||
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
|
||||
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
|
||||
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
|
||||
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
|
||||
}
|
||||
|
||||
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesResponse struct {
|
||||
Files []File `json:"files"` // An array of objects, each one describing one file.
|
||||
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
|
||||
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
|
||||
}
|
||||
|
||||
// GetUploadURLRequest is passed to b2_get_upload_url
|
||||
type GetUploadURLRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
}
|
||||
|
||||
// GetUploadURLResponse is received from b2_get_upload_url
|
||||
type GetUploadURLResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||
}
|
||||
|
||||
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
|
||||
type GetDownloadAuthorizationRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
|
||||
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
|
||||
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
|
||||
}
|
||||
|
||||
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
|
||||
type GetDownloadAuthorizationResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
|
||||
}
|
||||
|
||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||
type FileInfo struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
AccountID string `json:"accountId"` // Your account ID.
|
||||
BucketID string `json:"bucketId"` // The bucket that the file is in.
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// CreateBucketRequest is used to create a bucket
|
||||
type CreateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// DeleteBucketRequest is used to create a bucket
|
||||
type DeleteBucketRequest struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
}
|
||||
|
||||
// DeleteFileRequest is used to delete a file version
|
||||
type DeleteFileRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
}
|
||||
|
||||
// HideFileRequest is used to delete a file
|
||||
type HideFileRequest struct {
|
||||
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
|
||||
Name string `json:"fileName"` // The name of the file to hide.
|
||||
}
|
||||
|
||||
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
|
||||
type GetFileInfoRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
}
|
||||
|
||||
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
|
||||
//
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
// Java call Date.setTime(long time).
|
||||
//
|
||||
// If the caller knows the SHA1 of the entire large file being
|
||||
// uploaded, Backblaze recommends using large_file_sha1 as the name,
|
||||
// and a 40 byte hex string representing the SHA1.
|
||||
//
|
||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
}
|
||||
|
||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||
type GetUploadPartURLRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLResponse is received from b2_get_upload_url
|
||||
type GetUploadPartURLResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
|
||||
}
|
||||
|
||||
// UploadPartResponse is the response to b2_upload_part
|
||||
type UploadPartResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
}
|
||||
|
||||
// FinishLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
|
||||
//
|
||||
// Large files do not have a SHA1 checksum. The value will always be "none".
|
||||
type FinishLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
|
||||
}
|
||||
|
||||
// CancelLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a CancelLargeFileResponse
|
||||
type CancelLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// CancelLargeFileResponse is the response to CancelLargeFileRequest
|
||||
type CancelLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
}
|
||||
|
||||
// CopyFileRequest is as passed to b2_copy_file
|
||||
type CopyFileRequest struct {
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
Name string `json:"fileName"` // The name of the new file being created.
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||
}
|
||||
87
backend/b2/api/types_test.go
Normal file
87
backend/b2/api/types_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyT api.Timestamp
|
||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||
)
|
||||
|
||||
func TestTimestampMarshalJSON(t *testing.T) {
|
||||
resB, err := t0.MarshalJSON()
|
||||
res := string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "3661123", res)
|
||||
|
||||
resB, err = t1.MarshalJSON()
|
||||
res = string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "981173106123", res)
|
||||
}
|
||||
|
||||
func TestTimestampUnmarshalJSON(t *testing.T) {
|
||||
var tActual api.Timestamp
|
||||
err := tActual.UnmarshalJSON([]byte("981173106123"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||
}
|
||||
|
||||
func TestTimestampAddVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
t api.Timestamp
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||
{t1, "", "-v2001-02-03-040506-123"},
|
||||
} {
|
||||
actual := test.t.AddVersion(test.in)
|
||||
assert.Equal(t, test.expected, actual, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampRemoveVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedT api.Timestamp
|
||||
expectedRemote string
|
||||
}{
|
||||
{"potato.txt", emptyT, "potato.txt"},
|
||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||
{"-v2001-02-03-040506-123", t1, ""},
|
||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||
} {
|
||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsZero(t *testing.T) {
|
||||
assert.True(t, emptyT.IsZero())
|
||||
assert.False(t, t0.IsZero())
|
||||
assert.False(t, t1.IsZero())
|
||||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
}
|
||||
1805
backend/b2/b2.go
Normal file
1805
backend/b2/b2.go
Normal file
File diff suppressed because it is too large
Load Diff
170
backend/b2/b2_internal_test.go
Normal file
170
backend/b2/b2_internal_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package b2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fstest"
|
||||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||
|
||||
var encodeTest = []struct {
|
||||
fullyEncoded string
|
||||
minimallyEncoded string
|
||||
plainText string
|
||||
}{
|
||||
{fullyEncoded: "%20", minimallyEncoded: "+", plainText: " "},
|
||||
{fullyEncoded: "%21", minimallyEncoded: "!", plainText: "!"},
|
||||
{fullyEncoded: "%22", minimallyEncoded: "%22", plainText: "\""},
|
||||
{fullyEncoded: "%23", minimallyEncoded: "%23", plainText: "#"},
|
||||
{fullyEncoded: "%24", minimallyEncoded: "$", plainText: "$"},
|
||||
{fullyEncoded: "%25", minimallyEncoded: "%25", plainText: "%"},
|
||||
{fullyEncoded: "%26", minimallyEncoded: "%26", plainText: "&"},
|
||||
{fullyEncoded: "%27", minimallyEncoded: "'", plainText: "'"},
|
||||
{fullyEncoded: "%28", minimallyEncoded: "(", plainText: "("},
|
||||
{fullyEncoded: "%29", minimallyEncoded: ")", plainText: ")"},
|
||||
{fullyEncoded: "%2A", minimallyEncoded: "*", plainText: "*"},
|
||||
{fullyEncoded: "%2B", minimallyEncoded: "%2B", plainText: "+"},
|
||||
{fullyEncoded: "%2C", minimallyEncoded: "%2C", plainText: ","},
|
||||
{fullyEncoded: "%2D", minimallyEncoded: "-", plainText: "-"},
|
||||
{fullyEncoded: "%2E", minimallyEncoded: ".", plainText: "."},
|
||||
{fullyEncoded: "%2F", minimallyEncoded: "/", plainText: "/"},
|
||||
{fullyEncoded: "%30", minimallyEncoded: "0", plainText: "0"},
|
||||
{fullyEncoded: "%31", minimallyEncoded: "1", plainText: "1"},
|
||||
{fullyEncoded: "%32", minimallyEncoded: "2", plainText: "2"},
|
||||
{fullyEncoded: "%33", minimallyEncoded: "3", plainText: "3"},
|
||||
{fullyEncoded: "%34", minimallyEncoded: "4", plainText: "4"},
|
||||
{fullyEncoded: "%35", minimallyEncoded: "5", plainText: "5"},
|
||||
{fullyEncoded: "%36", minimallyEncoded: "6", plainText: "6"},
|
||||
{fullyEncoded: "%37", minimallyEncoded: "7", plainText: "7"},
|
||||
{fullyEncoded: "%38", minimallyEncoded: "8", plainText: "8"},
|
||||
{fullyEncoded: "%39", minimallyEncoded: "9", plainText: "9"},
|
||||
{fullyEncoded: "%3A", minimallyEncoded: ":", plainText: ":"},
|
||||
{fullyEncoded: "%3B", minimallyEncoded: ";", plainText: ";"},
|
||||
{fullyEncoded: "%3C", minimallyEncoded: "%3C", plainText: "<"},
|
||||
{fullyEncoded: "%3D", minimallyEncoded: "=", plainText: "="},
|
||||
{fullyEncoded: "%3E", minimallyEncoded: "%3E", plainText: ">"},
|
||||
{fullyEncoded: "%3F", minimallyEncoded: "%3F", plainText: "?"},
|
||||
{fullyEncoded: "%40", minimallyEncoded: "@", plainText: "@"},
|
||||
{fullyEncoded: "%41", minimallyEncoded: "A", plainText: "A"},
|
||||
{fullyEncoded: "%42", minimallyEncoded: "B", plainText: "B"},
|
||||
{fullyEncoded: "%43", minimallyEncoded: "C", plainText: "C"},
|
||||
{fullyEncoded: "%44", minimallyEncoded: "D", plainText: "D"},
|
||||
{fullyEncoded: "%45", minimallyEncoded: "E", plainText: "E"},
|
||||
{fullyEncoded: "%46", minimallyEncoded: "F", plainText: "F"},
|
||||
{fullyEncoded: "%47", minimallyEncoded: "G", plainText: "G"},
|
||||
{fullyEncoded: "%48", minimallyEncoded: "H", plainText: "H"},
|
||||
{fullyEncoded: "%49", minimallyEncoded: "I", plainText: "I"},
|
||||
{fullyEncoded: "%4A", minimallyEncoded: "J", plainText: "J"},
|
||||
{fullyEncoded: "%4B", minimallyEncoded: "K", plainText: "K"},
|
||||
{fullyEncoded: "%4C", minimallyEncoded: "L", plainText: "L"},
|
||||
{fullyEncoded: "%4D", minimallyEncoded: "M", plainText: "M"},
|
||||
{fullyEncoded: "%4E", minimallyEncoded: "N", plainText: "N"},
|
||||
{fullyEncoded: "%4F", minimallyEncoded: "O", plainText: "O"},
|
||||
{fullyEncoded: "%50", minimallyEncoded: "P", plainText: "P"},
|
||||
{fullyEncoded: "%51", minimallyEncoded: "Q", plainText: "Q"},
|
||||
{fullyEncoded: "%52", minimallyEncoded: "R", plainText: "R"},
|
||||
{fullyEncoded: "%53", minimallyEncoded: "S", plainText: "S"},
|
||||
{fullyEncoded: "%54", minimallyEncoded: "T", plainText: "T"},
|
||||
{fullyEncoded: "%55", minimallyEncoded: "U", plainText: "U"},
|
||||
{fullyEncoded: "%56", minimallyEncoded: "V", plainText: "V"},
|
||||
{fullyEncoded: "%57", minimallyEncoded: "W", plainText: "W"},
|
||||
{fullyEncoded: "%58", minimallyEncoded: "X", plainText: "X"},
|
||||
{fullyEncoded: "%59", minimallyEncoded: "Y", plainText: "Y"},
|
||||
{fullyEncoded: "%5A", minimallyEncoded: "Z", plainText: "Z"},
|
||||
{fullyEncoded: "%5B", minimallyEncoded: "%5B", plainText: "["},
|
||||
{fullyEncoded: "%5C", minimallyEncoded: "%5C", plainText: "\\"},
|
||||
{fullyEncoded: "%5D", minimallyEncoded: "%5D", plainText: "]"},
|
||||
{fullyEncoded: "%5E", minimallyEncoded: "%5E", plainText: "^"},
|
||||
{fullyEncoded: "%5F", minimallyEncoded: "_", plainText: "_"},
|
||||
{fullyEncoded: "%60", minimallyEncoded: "%60", plainText: "`"},
|
||||
{fullyEncoded: "%61", minimallyEncoded: "a", plainText: "a"},
|
||||
{fullyEncoded: "%62", minimallyEncoded: "b", plainText: "b"},
|
||||
{fullyEncoded: "%63", minimallyEncoded: "c", plainText: "c"},
|
||||
{fullyEncoded: "%64", minimallyEncoded: "d", plainText: "d"},
|
||||
{fullyEncoded: "%65", minimallyEncoded: "e", plainText: "e"},
|
||||
{fullyEncoded: "%66", minimallyEncoded: "f", plainText: "f"},
|
||||
{fullyEncoded: "%67", minimallyEncoded: "g", plainText: "g"},
|
||||
{fullyEncoded: "%68", minimallyEncoded: "h", plainText: "h"},
|
||||
{fullyEncoded: "%69", minimallyEncoded: "i", plainText: "i"},
|
||||
{fullyEncoded: "%6A", minimallyEncoded: "j", plainText: "j"},
|
||||
{fullyEncoded: "%6B", minimallyEncoded: "k", plainText: "k"},
|
||||
{fullyEncoded: "%6C", minimallyEncoded: "l", plainText: "l"},
|
||||
{fullyEncoded: "%6D", minimallyEncoded: "m", plainText: "m"},
|
||||
{fullyEncoded: "%6E", minimallyEncoded: "n", plainText: "n"},
|
||||
{fullyEncoded: "%6F", minimallyEncoded: "o", plainText: "o"},
|
||||
{fullyEncoded: "%70", minimallyEncoded: "p", plainText: "p"},
|
||||
{fullyEncoded: "%71", minimallyEncoded: "q", plainText: "q"},
|
||||
{fullyEncoded: "%72", minimallyEncoded: "r", plainText: "r"},
|
||||
{fullyEncoded: "%73", minimallyEncoded: "s", plainText: "s"},
|
||||
{fullyEncoded: "%74", minimallyEncoded: "t", plainText: "t"},
|
||||
{fullyEncoded: "%75", minimallyEncoded: "u", plainText: "u"},
|
||||
{fullyEncoded: "%76", minimallyEncoded: "v", plainText: "v"},
|
||||
{fullyEncoded: "%77", minimallyEncoded: "w", plainText: "w"},
|
||||
{fullyEncoded: "%78", minimallyEncoded: "x", plainText: "x"},
|
||||
{fullyEncoded: "%79", minimallyEncoded: "y", plainText: "y"},
|
||||
{fullyEncoded: "%7A", minimallyEncoded: "z", plainText: "z"},
|
||||
{fullyEncoded: "%7B", minimallyEncoded: "%7B", plainText: "{"},
|
||||
{fullyEncoded: "%7C", minimallyEncoded: "%7C", plainText: "|"},
|
||||
{fullyEncoded: "%7D", minimallyEncoded: "%7D", plainText: "}"},
|
||||
{fullyEncoded: "%7E", minimallyEncoded: "~", plainText: "~"},
|
||||
{fullyEncoded: "%7F", minimallyEncoded: "%7F", plainText: "\u007f"},
|
||||
{fullyEncoded: "%E8%87%AA%E7%94%B1", minimallyEncoded: "%E8%87%AA%E7%94%B1", plainText: "自由"},
|
||||
{fullyEncoded: "%F0%90%90%80", minimallyEncoded: "%F0%90%90%80", plainText: "𐐀"},
|
||||
}
|
||||
|
||||
func TestUrlEncode(t *testing.T) {
|
||||
for _, test := range encodeTest {
|
||||
got := urlEncode(test.plainText)
|
||||
if got != test.minimallyEncoded && got != test.fullyEncoded {
|
||||
t.Errorf("urlEncode(%q) got %q wanted %q or %q", test.plainText, got, test.minimallyEncoded, test.fullyEncoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in time.Time
|
||||
want string
|
||||
}{
|
||||
{fstest.Time("1970-01-01T00:00:00.000000000Z"), "0"},
|
||||
{fstest.Time("2001-02-03T04:05:10.123123123Z"), "981173110123"},
|
||||
{fstest.Time("2001-02-03T05:05:10.123123123+01:00"), "981173110123"},
|
||||
} {
|
||||
got := timeString(test.in)
|
||||
if test.want != got {
|
||||
t.Logf("%v: want %v got %v", test.in, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseTimeString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want time.Time
|
||||
wantError string
|
||||
}{
|
||||
{"0", fstest.Time("1970-01-01T00:00:00.000000000Z"), ""},
|
||||
{"981173110123", fstest.Time("2001-02-03T04:05:10.123000000Z"), ""},
|
||||
{"", time.Time{}, ""},
|
||||
{"potato", time.Time{}, `strconv.ParseInt: parsing "potato": invalid syntax`},
|
||||
} {
|
||||
o := Object{}
|
||||
err := o.parseTimeString(test.in)
|
||||
got := o.modTime
|
||||
var gotError string
|
||||
if err != nil {
|
||||
gotError = err.Error()
|
||||
}
|
||||
if test.want != got {
|
||||
t.Logf("%v: want %v got %v", test.in, test.want, got)
|
||||
}
|
||||
if test.wantError != gotError {
|
||||
t.Logf("%v: want error %v got error %v", test.in, test.wantError, gotError)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
34
backend/b2/b2_test.go
Normal file
34
backend/b2/b2_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// Test B2 filesystem interface
|
||||
package b2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestB2:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
NeedMultipleChunks: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
436
backend/b2/upload.go
Normal file
436
backend/b2/upload.go
Normal file
@@ -0,0 +1,436 @@
|
||||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
type hashAppendingReader struct {
|
||||
h gohash.Hash
|
||||
in io.Reader
|
||||
hexSum string
|
||||
hexReader io.Reader
|
||||
}
|
||||
|
||||
// Read returns bytes all bytes from the original reader, then the hex sum
|
||||
// of what was read so far, then EOF.
|
||||
func (har *hashAppendingReader) Read(b []byte) (int, error) {
|
||||
if har.hexReader == nil {
|
||||
n, err := har.in.Read(b)
|
||||
if err == io.EOF {
|
||||
har.in = nil // allow GC
|
||||
err = nil // allow reading hexSum before EOF
|
||||
|
||||
har.hexSum = hex.EncodeToString(har.h.Sum(nil))
|
||||
har.hexReader = strings.NewReader(har.hexSum)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
return har.hexReader.Read(b)
|
||||
}
|
||||
|
||||
// AdditionalLength returns how many bytes the appended hex sum will take up.
|
||||
func (har *hashAppendingReader) AdditionalLength() int {
|
||||
return hex.EncodedLen(har.h.Size())
|
||||
}
|
||||
|
||||
// HexSum returns the hash sum as hex. It's only available after the original
|
||||
// reader has EOF'd. It's an empty string before that.
|
||||
func (har *hashAppendingReader) HexSum() string {
|
||||
return har.hexSum
|
||||
}
|
||||
|
||||
// newHashAppendingReader takes a Reader and a Hash and will append the hex sum
|
||||
// after the original reader reaches EOF. The increased size depends on the
|
||||
// given hash, which may be queried through AdditionalLength()
|
||||
func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
|
||||
withHash := io.TeeReader(in, h)
|
||||
return &hashAppendingReader{h: h, in: withHash}
|
||||
}
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
parts = size / int64(o.fs.opt.ChunkSize)
|
||||
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
up = &largeUpload{
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
wrap: wrap,
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, sha1SliceSize),
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_part_url",
|
||||
}
|
||||
var request = api.GetUploadPartURLRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
|
||||
// returnUploadURL returns the UploadURL to the cache
|
||||
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
if upload == nil {
|
||||
return
|
||||
}
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = append(up.uploads, upload)
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (up *largeUpload) clearUploadURL() {
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = nil
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||
|
||||
// Authorization
|
||||
//
|
||||
// An upload authorization token, from b2_get_upload_part_url.
|
||||
//
|
||||
// X-Bz-Part-Number
|
||||
//
|
||||
// A number from 1 to 10000. The parts uploaded for one file
|
||||
// must have contiguous numbers, starting with 1.
|
||||
//
|
||||
// Content-Length
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: upload.UploadURL,
|
||||
Body: up.wrap(in),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
sha1Header: "hex_digits_at_end",
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
up.sha1s[part-1] = in.HexSum()
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_finish_large_file",
|
||||
}
|
||||
var request = api.FinishLargeFileRequest{
|
||||
ID: up.id,
|
||||
SHA1s: up.sha1s,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
}
|
||||
var request = api.CancelLargeFileRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(part, buf)
|
||||
}
|
||||
|
||||
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return up.finish()
|
||||
}
|
||||
|
||||
// Stream uploads the chunks from the input, starting with a required initial
|
||||
// chunk. Assumes the file size is unknown and will upload until the input
|
||||
// reaches EOF.
|
||||
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
errs := make(chan error, 1)
|
||||
hasMoreParts := true
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Transfer initial chunk
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
|
||||
|
||||
outer:
|
||||
for part := int64(2); hasMoreParts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
n, err = io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
hasMoreParts = false
|
||||
err = nil
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putUploadBlock(buf)
|
||||
err = nil
|
||||
break outer
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
}
|
||||
wg.Wait()
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
var err error
|
||||
outer:
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(up.f.opt.ChunkSize) {
|
||||
reqSize = int64(up.f.opt.ChunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()[:reqSize]
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
204
backend/box/api/types.go
Normal file
204
backend/box/api/types.go
Normal file
@@ -0,0 +1,204 @@
|
||||
// Package api has type definitions for box
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// box API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from box when things go wrong
|
||||
type Error struct {
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.ContextInfo != nil {
|
||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
ItemStatusActive = "active"
|
||||
ItemStatusTrashed = "trashed"
|
||||
ItemStatusDeleted = "deleted"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
type Item struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
func (i *Item) ModTime() (t time.Time) {
|
||||
t = time.Time(i.ContentModifiedAt)
|
||||
if t.IsZero() {
|
||||
t = time.Time(i.ModifiedAt)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// FolderItems is returned from the GetFolderItems call
|
||||
type FolderItems struct {
|
||||
TotalCount int `json:"total_count"`
|
||||
Entries []Item `json:"entries"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
} `json:"order"`
|
||||
}
|
||||
|
||||
// Parent defined the ID of the parent directory
|
||||
type Parent struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// CreateFolder is the request for Create Folder
|
||||
type CreateFolder struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// UploadFile is the request for Upload File
|
||||
type UploadFile struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// UpdateFileModTime is used in Update File Info
|
||||
type UpdateFileModTime struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// UpdateFileMove is the request for Upload File to change name and parent
|
||||
type UpdateFileMove struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// CopyFile is the request for Copy File
|
||||
type CopyFile struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// CreateSharedLink is the request for Public Link
|
||||
type CreateSharedLink struct {
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
}
|
||||
|
||||
// UploadSessionRequest is uses in Create Upload Session
|
||||
type UploadSessionRequest struct {
|
||||
FolderID string `json:"folder_id,omitempty"` // don't pass for update
|
||||
FileSize int64 `json:"file_size"`
|
||||
FileName string `json:"file_name,omitempty"` // optional for update
|
||||
}
|
||||
|
||||
// UploadSessionResponse is returned from Create Upload Session
|
||||
type UploadSessionResponse struct {
|
||||
TotalParts int `json:"total_parts"`
|
||||
PartSize int64 `json:"part_size"`
|
||||
SessionEndpoints struct {
|
||||
ListParts string `json:"list_parts"`
|
||||
Commit string `json:"commit"`
|
||||
UploadPart string `json:"upload_part"`
|
||||
Status string `json:"status"`
|
||||
Abort string `json:"abort"`
|
||||
} `json:"session_endpoints"`
|
||||
SessionExpiresAt Time `json:"session_expires_at"`
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
NumPartsProcessed int `json:"num_parts_processed"`
|
||||
}
|
||||
|
||||
// Part defines the return from upload part call which are passed to commit upload also
|
||||
type Part struct {
|
||||
PartID string `json:"part_id"`
|
||||
Offset int64 `json:"offset"`
|
||||
Size int64 `json:"size"`
|
||||
Sha1 string `json:"sha1"`
|
||||
}
|
||||
|
||||
// UploadPartResponse is returned from the upload part call
|
||||
type UploadPartResponse struct {
|
||||
Part Part `json:"part"`
|
||||
}
|
||||
|
||||
// CommitUpload is used in the Commit Upload call
|
||||
type CommitUpload struct {
|
||||
Parts []Part `json:"parts"`
|
||||
Attributes struct {
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
1143
backend/box/box.go
Normal file
1143
backend/box/box.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/box/box_test.go
Normal file
17
backend/box/box_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Box filesystem interface
|
||||
package box_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/box"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestBox:",
|
||||
NilObject: (*box.Object)(nil),
|
||||
})
|
||||
}
|
||||
275
backend/box/upload.go
Normal file
275
backend/box/upload.go
Normal file
@@ -0,0 +1,275 @@
|
||||
// multpart upload for box
|
||||
|
||||
package box
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions",
|
||||
RootURL: uploadURL,
|
||||
}
|
||||
request := api.UploadSessionRequest{
|
||||
FileSize: size,
|
||||
}
|
||||
// If object has an ID then it is existing so create a new version
|
||||
if o.id != "" {
|
||||
opts.Path = "/files/" + o.id + "/upload_sessions"
|
||||
} else {
|
||||
opts.Path = "/files/upload_sessions"
|
||||
request.FolderID = directoryID
|
||||
request.FileName = replaceReservedChars(leaf)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// sha1Digest produces a digest using sha1 as per RFC3230
|
||||
func sha1Digest(digest []byte) string {
|
||||
return "sha=" + base64.StdEncoding.EncodeToString(digest)
|
||||
}
|
||||
|
||||
// uploadPart uploads a part in an upload session
|
||||
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
chunkSize := int64(len(chunk))
|
||||
sha1sum := sha1.Sum(chunk)
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
RootURL: uploadURL,
|
||||
ContentType: "application/octet-stream",
|
||||
ContentLength: &chunkSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Digest": sha1Digest(sha1sum[:]),
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts.Body = wrap(bytes.NewReader(chunk))
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// commitUpload finishes an upload session
|
||||
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions/" + SessionID + "/commit",
|
||||
RootURL: uploadURL,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Digest": sha1Digest(sha1sum),
|
||||
},
|
||||
}
|
||||
request := api.CommitUpload{
|
||||
Parts: parts,
|
||||
}
|
||||
request.Attributes.ContentModifiedAt = api.Time(modTime)
|
||||
request.Attributes.ContentCreatedAt = api.Time(modTime)
|
||||
var body []byte
|
||||
var resp *http.Response
|
||||
// For discussion of this value see:
|
||||
// https://github.com/rclone/rclone/issues/2054
|
||||
maxTries := o.fs.opt.CommitRetries
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
delay := defaultDelay
|
||||
var why string
|
||||
if err != nil {
|
||||
// Sometimes we get 400 Error with
|
||||
// parts_mismatch immediately after uploading
|
||||
// the last part. Ignore this error and wait.
|
||||
if boxErr, ok := err.(*api.Error); ok && boxErr.Code == "parts_mismatch" {
|
||||
why = err.Error()
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated:
|
||||
break outer
|
||||
case http.StatusAccepted:
|
||||
why = "not ready yet"
|
||||
delayString := resp.Header.Get("Retry-After")
|
||||
if delayString != "" {
|
||||
delay, err = strconv.Atoi(delayString)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Couldn't decode Retry-After header %q: %v", delayString, err)
|
||||
delay = defaultDelay
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
}
|
||||
if tries >= maxTries {
|
||||
return nil, errors.New("too many tries to commit multipart upload - increase --low-level-retries")
|
||||
}
|
||||
err = json.Unmarshal(body, &result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// abortUpload cancels an upload session
|
||||
func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
RootURL: uploadURL,
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
}
|
||||
chunkSize := session.PartSize
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Cancel the session if something went wrong
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.abortUpload(session.ID)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Upload the chunks
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
parts := make([]api.Part, session.TotalParts)
|
||||
hash := sha1.New()
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
outer:
|
||||
for part := 0; part < session.TotalParts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
|
||||
// Make a block of memory
|
||||
buf := make([]byte, reqSize)
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||
break outer
|
||||
}
|
||||
|
||||
// Make the global hash (must be done sequentially)
|
||||
_, _ = hash.Write(buf)
|
||||
|
||||
// Transfer the chunk
|
||||
wg.Add(1)
|
||||
o.fs.uploadToken.Get()
|
||||
go func(part int, position int64) {
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
parts[part] = partResponse.Part
|
||||
}(part, position)
|
||||
|
||||
// ready for next block
|
||||
remaining -= chunkSize
|
||||
position += chunkSize
|
||||
}
|
||||
wg.Wait()
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
|
||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||
return errors.Errorf("multipart upload failed %v - not sure why", o)
|
||||
}
|
||||
return o.setMetaData(&result.Entries[0])
|
||||
}
|
||||
1882
backend/cache/cache.go
vendored
Normal file
1882
backend/cache/cache.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1664
backend/cache/cache_internal_test.go
vendored
Normal file
1664
backend/cache/cache_internal_test.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
78
backend/cache/cache_mount_unix_test.go
vendored
Normal file
78
backend/cache/cache_mount_unix_test.go
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
// +build !plan9,!windows
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/rclone/rclone/cmd/mount"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
var options = []fuse.MountOption{
|
||||
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device), fuse.VolumeName(device),
|
||||
fuse.NoAppleDouble(),
|
||||
fuse.NoAppleXattr(),
|
||||
//fuse.AllowOther(),
|
||||
}
|
||||
err := os.MkdirAll(r.mntDir, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
c, err := fuse.Mount(r.mntDir, options...)
|
||||
require.NoError(t, err)
|
||||
filesys := mount.NewFS(f)
|
||||
server := fusefs.New(c, nil)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
r.unmountRes = make(chan error, 1)
|
||||
go func() {
|
||||
err := server.Serve(filesys)
|
||||
closeErr := c.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
r.unmountRes <- err
|
||||
}()
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
require.NoError(t, c.MountError)
|
||||
|
||||
r.unmountFn = func() error {
|
||||
// Shutdown the VFS
|
||||
filesys.VFS.Shutdown()
|
||||
return fuse.Unmount(r.mntDir)
|
||||
}
|
||||
|
||||
r.vfs = filesys.VFS
|
||||
r.isMounted = true
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
var err error
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
err = r.unmountFn()
|
||||
if err != nil {
|
||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = <-r.unmountRes
|
||||
require.NoError(t, err)
|
||||
err = r.vfs.CleanUp()
|
||||
require.NoError(t, err)
|
||||
r.isMounted = false
|
||||
}
|
||||
124
backend/cache/cache_mount_windows_test.go
vendored
Normal file
124
backend/cache/cache_mount_windows_test.go
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
// +build windows
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/cmount"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// waitFor runs fn() until it returns true or the timeout expires
|
||||
func waitFor(fn func() bool) (ok bool) {
|
||||
const totalWait = 10 * time.Second
|
||||
const individualWait = 10 * time.Millisecond
|
||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||
ok = fn()
|
||||
if ok {
|
||||
return ok
|
||||
}
|
||||
time.Sleep(individualWait)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
// FIXME implement cmount
|
||||
t.Skip("windows not supported yet")
|
||||
|
||||
device := f.Name() + ":" + f.Root()
|
||||
options := []string{
|
||||
"-o", "fsname=" + device,
|
||||
"-o", "subtype=rclone",
|
||||
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
||||
"-o", "uid=-1",
|
||||
"-o", "gid=-1",
|
||||
"-o", "allow_other",
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
"-o", "atomic_o_trunc",
|
||||
"--FileSystemName=rclone",
|
||||
}
|
||||
|
||||
fsys := cmount.NewFS(f)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
r.unmountRes = make(chan error, 1)
|
||||
go func() {
|
||||
var err error
|
||||
ok := host.Mount(r.mntDir, options)
|
||||
if !ok {
|
||||
err = errors.New("mount failed")
|
||||
}
|
||||
r.unmountRes <- err
|
||||
}()
|
||||
|
||||
// unmount
|
||||
r.unmountFn = func() error {
|
||||
// Shutdown the VFS
|
||||
fsys.VFS.Shutdown()
|
||||
if host.Unmount() {
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(r.mntDir)
|
||||
return err != nil
|
||||
}) {
|
||||
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return errors.New("host unmount failed")
|
||||
}
|
||||
|
||||
// Wait for the filesystem to become ready, checking the file
|
||||
// system didn't blow up before starting
|
||||
select {
|
||||
case err := <-r.unmountRes:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second * 3):
|
||||
}
|
||||
|
||||
// Wait for the mount point to be available on Windows
|
||||
// On Windows the Init signal comes slightly before the mount is ready
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(r.mntDir)
|
||||
return err == nil
|
||||
}) {
|
||||
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
|
||||
}
|
||||
|
||||
r.vfs = fsys.VFS
|
||||
r.isMounted = true
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
// FIXME implement cmount
|
||||
t.Skip("windows not supported yet")
|
||||
var err error
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
err = r.unmountFn()
|
||||
if err != nil {
|
||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = <-r.unmountRes
|
||||
require.NoError(t, err)
|
||||
err = r.vfs.CleanUp()
|
||||
require.NoError(t, err)
|
||||
r.isMounted = false
|
||||
}
|
||||
23
backend/cache/cache_test.go
vendored
Normal file
23
backend/cache/cache_test.go
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
})
|
||||
}
|
||||
6
backend/cache/cache_unsupported.go
vendored
Normal file
6
backend/cache/cache_unsupported.go
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
|
||||
package cache
|
||||
455
backend/cache/cache_upload_test.go
vendored
Normal file
455
backend/cache/cache_upload_test.go
vendored
Normal file
@@ -0,0 +1,455 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||
// create some rand test data
|
||||
testSize := int64(524288000)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(524416032), ti.Size())
|
||||
} else {
|
||||
require.Equal(t, testSize, ti.Size())
|
||||
}
|
||||
de1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||
// check if it was removed from temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if it can be read
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, data2, 1024)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir(context.Background(), "one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir(context.Background(), "second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(10485760)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
//require.NoError(t, err)
|
||||
|
||||
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir(context.Background(), "one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir(context.Background(), "second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(1048576)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
testReader2 := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
|
||||
require.False(t, os.IsNotExist(err))
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||
|
||||
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
lastFile = remote
|
||||
}
|
||||
|
||||
// check if cache lists all files, likely temp upload didn't finish yet
|
||||
de1, err := runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
|
||||
// wait for background uploader to do its thing
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||
|
||||
// retry until we have no more temp files and fail if they don't go down to 0
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if cache lists all files
|
||||
de1, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove - allowed
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.Error(t, err)
|
||||
var started bool
|
||||
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||
require.NoError(t, err)
|
||||
require.False(t, started)
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Rmdir - allowed
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.False(t, started)
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename -- allowed
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update -- allowed
|
||||
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(67), tmpInfo.Size())
|
||||
} else {
|
||||
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
}
|
||||
|
||||
// test SetModTime -- allowed
|
||||
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, secondModTime, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update - this seems to work. Why? FIXME
|
||||
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||
// require.Equal(t, "one content", string(data2))
|
||||
//
|
||||
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
// require.NoError(t, err)
|
||||
// if runInstance.rootIsCrypt {
|
||||
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||
// } else {
|
||||
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
// }
|
||||
//})
|
||||
//require.Error(t, err)
|
||||
|
||||
// test SetModTime -- seems to work cause of previous
|
||||
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, secondModTime, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
138
backend/cache/directory.go
vendored
Normal file
138
backend/cache/directory.go
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Directory is a generic dir that stores basic information about it
|
||||
type Directory struct {
|
||||
Directory fs.Directory `json:"-"` // can be nil
|
||||
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the directory
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
|
||||
CacheItems int64 `json:"items"` // number of objects or -1 for unknown
|
||||
CacheType string `json:"cacheType"` // object type
|
||||
CacheTs *time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NewDirectory builds an empty dir which will be used to unmarshal data in it
|
||||
func NewDirectory(f *Fs, remote string) *Directory {
|
||||
cd := ShallowDirectory(f, remote)
|
||||
t := time.Now()
|
||||
cd.CacheTs = &t
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// ShallowDirectory builds an empty dir which will be used to unmarshal data in it
|
||||
func ShallowDirectory(f *Fs, remote string) *Directory {
|
||||
var cd *Directory
|
||||
fullRemote := cleanPath(path.Join(f.Root(), remote))
|
||||
|
||||
// build a new one
|
||||
dir := cleanPath(path.Dir(fullRemote))
|
||||
name := cleanPath(path.Base(fullRemote))
|
||||
cd = &Directory{
|
||||
CacheFs: f,
|
||||
Name: name,
|
||||
Dir: dir,
|
||||
CacheModTime: time.Now().UnixNano(),
|
||||
CacheSize: 0,
|
||||
CacheItems: 0,
|
||||
CacheType: "Directory",
|
||||
}
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// DirectoryFromOriginal builds one from a generic fs.Directory
|
||||
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory {
|
||||
var cd *Directory
|
||||
fullRemote := path.Join(f.Root(), d.Remote())
|
||||
|
||||
dir := cleanPath(path.Dir(fullRemote))
|
||||
name := cleanPath(path.Base(fullRemote))
|
||||
t := time.Now()
|
||||
cd = &Directory{
|
||||
Directory: d,
|
||||
CacheFs: f,
|
||||
Name: name,
|
||||
Dir: dir,
|
||||
CacheModTime: d.ModTime(ctx).UnixNano(),
|
||||
CacheSize: d.Size(),
|
||||
CacheItems: d.Items(),
|
||||
CacheType: "Directory",
|
||||
CacheTs: &t,
|
||||
}
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// Fs returns its FS info
|
||||
func (d *Directory) Fs() fs.Info {
|
||||
return d.CacheFs
|
||||
}
|
||||
|
||||
// String returns a human friendly name for this object
|
||||
func (d *Directory) String() string {
|
||||
if d == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return d.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (d *Directory) Remote() string {
|
||||
return d.CacheFs.cleanRootFromPath(d.abs())
|
||||
}
|
||||
|
||||
// abs returns the absolute path to the dir
|
||||
func (d *Directory) abs() string {
|
||||
return cleanPath(path.Join(d.Dir, d.Name))
|
||||
}
|
||||
|
||||
// parentRemote returns the absolute path parent remote
|
||||
func (d *Directory) parentRemote() string {
|
||||
absPath := d.abs()
|
||||
if absPath == "" {
|
||||
return ""
|
||||
}
|
||||
return cleanPath(path.Dir(absPath))
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (d *Directory) ModTime(ctx context.Context) time.Time {
|
||||
return time.Unix(0, d.CacheModTime)
|
||||
}
|
||||
|
||||
// Size returns the cached Size
|
||||
func (d *Directory) Size() int64 {
|
||||
return d.CacheSize
|
||||
}
|
||||
|
||||
// Items returns the cached Items
|
||||
func (d *Directory) Items() int64 {
|
||||
return d.CacheItems
|
||||
}
|
||||
|
||||
// ID returns the ID of the cached directory if known
|
||||
func (d *Directory) ID() string {
|
||||
if d.Directory == nil {
|
||||
return ""
|
||||
}
|
||||
return d.Directory.ID()
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Directory = (*Directory)(nil)
|
||||
)
|
||||
638
backend/cache/handle.go
vendored
Normal file
638
backend/cache/handle.go
vendored
Normal file
@@ -0,0 +1,638 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
var uploaderMap = make(map[string]*backgroundWriter)
|
||||
var uploaderMapMx sync.Mutex
|
||||
|
||||
// initBackgroundUploader returns a single instance
|
||||
func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
|
||||
// write lock to create one
|
||||
uploaderMapMx.Lock()
|
||||
defer uploaderMapMx.Unlock()
|
||||
if b, ok := uploaderMap[fs.String()]; ok {
|
||||
// if it was already started we close it so that it can be started again
|
||||
if b.running {
|
||||
b.close()
|
||||
} else {
|
||||
return b, nil
|
||||
}
|
||||
}
|
||||
|
||||
bb := newBackgroundWriter(fs)
|
||||
uploaderMap[fs.String()] = bb
|
||||
return uploaderMap[fs.String()], nil
|
||||
}
|
||||
|
||||
// Handle is managing the read/write/seek operations on an open handle
|
||||
type Handle struct {
|
||||
ctx context.Context
|
||||
cachedObject *Object
|
||||
cfs *Fs
|
||||
memory *Memory
|
||||
preloadQueue chan int64
|
||||
preloadOffset int64
|
||||
offset int64
|
||||
seenOffsets map[int64]bool
|
||||
mu sync.Mutex
|
||||
workersWg sync.WaitGroup
|
||||
confirmReading chan bool
|
||||
workers int
|
||||
maxWorkerID int
|
||||
UseMemory bool
|
||||
closed bool
|
||||
reading bool
|
||||
}
|
||||
|
||||
// NewObjectHandle returns a new Handle for an existing Object
|
||||
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle {
|
||||
r := &Handle{
|
||||
ctx: ctx,
|
||||
cachedObject: o,
|
||||
cfs: cfs,
|
||||
offset: 0,
|
||||
preloadOffset: -1, // -1 to trigger the first preload
|
||||
|
||||
UseMemory: !cfs.opt.ChunkNoMemory,
|
||||
reading: false,
|
||||
}
|
||||
r.seenOffsets = make(map[int64]bool)
|
||||
r.memory = NewMemory(-1)
|
||||
|
||||
// create a larger buffer to queue up requests
|
||||
r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10)
|
||||
r.confirmReading = make(chan bool)
|
||||
r.startReadWorkers()
|
||||
return r
|
||||
}
|
||||
|
||||
// cacheFs is a convenience method to get the parent cache FS of the object's manager
|
||||
func (r *Handle) cacheFs() *Fs {
|
||||
return r.cfs
|
||||
}
|
||||
|
||||
// storage is a convenience method to get the persistent storage of the object's manager
|
||||
func (r *Handle) storage() *Persistent {
|
||||
return r.cacheFs().cache
|
||||
}
|
||||
|
||||
// String representation of this reader
|
||||
func (r *Handle) String() string {
|
||||
return r.cachedObject.abs()
|
||||
}
|
||||
|
||||
// startReadWorkers will start the worker pool
|
||||
func (r *Handle) startReadWorkers() {
|
||||
if r.workers > 0 {
|
||||
return
|
||||
}
|
||||
totalWorkers := r.cacheFs().opt.TotalWorkers
|
||||
|
||||
if r.cacheFs().plexConnector.isConfigured() {
|
||||
if !r.cacheFs().plexConnector.isConnected() {
|
||||
err := r.cacheFs().plexConnector.authenticate()
|
||||
if err != nil {
|
||||
fs.Errorf(r, "failed to authenticate to Plex: %v", err)
|
||||
}
|
||||
}
|
||||
if r.cacheFs().plexConnector.isConnected() {
|
||||
totalWorkers = 1
|
||||
}
|
||||
}
|
||||
|
||||
r.scaleWorkers(totalWorkers)
|
||||
}
|
||||
|
||||
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||
func (r *Handle) scaleWorkers(desired int) {
|
||||
current := r.workers
|
||||
if current == desired {
|
||||
return
|
||||
}
|
||||
if current > desired {
|
||||
// scale in gracefully
|
||||
for r.workers > desired {
|
||||
r.preloadQueue <- -1
|
||||
r.workers--
|
||||
}
|
||||
} else {
|
||||
// scale out
|
||||
for r.workers < desired {
|
||||
w := &worker{
|
||||
r: r,
|
||||
id: r.maxWorkerID,
|
||||
}
|
||||
r.workersWg.Add(1)
|
||||
r.workers++
|
||||
r.maxWorkerID++
|
||||
go w.run()
|
||||
}
|
||||
}
|
||||
// ignore first scale out from 0
|
||||
if current != 0 {
|
||||
fs.Debugf(r, "scale workers to %v", desired)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) confirmExternalReading() {
|
||||
// if we have a max value of workers
|
||||
// then we skip this step
|
||||
if r.workers > 1 ||
|
||||
!r.cacheFs().plexConnector.isConfigured() {
|
||||
return
|
||||
}
|
||||
if !r.cacheFs().plexConnector.isPlaying(r.cachedObject) {
|
||||
return
|
||||
}
|
||||
fs.Infof(r, "confirmed reading by external reader")
|
||||
r.scaleWorkers(r.cacheFs().opt.TotalWorkers)
|
||||
}
|
||||
|
||||
// queueOffset will send an offset to the workers if it's different from the last one
|
||||
func (r *Handle) queueOffset(offset int64) {
|
||||
if offset != r.preloadOffset {
|
||||
// clean past in-memory chunks
|
||||
if r.UseMemory {
|
||||
go r.memory.CleanChunksByNeed(offset)
|
||||
}
|
||||
r.confirmExternalReading()
|
||||
r.preloadOffset = offset
|
||||
|
||||
// clear the past seen chunks
|
||||
// they will remain in our persistent storage but will be removed from transient
|
||||
// so they need to be picked up by a worker
|
||||
for k := range r.seenOffsets {
|
||||
if k < offset {
|
||||
r.seenOffsets[k] = false
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < r.workers; i++ {
|
||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
}
|
||||
if v, ok := r.seenOffsets[o]; ok && v {
|
||||
continue
|
||||
}
|
||||
|
||||
r.seenOffsets[o] = true
|
||||
r.preloadQueue <- o
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
|
||||
// it can be from transient or persistent cache
|
||||
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
|
||||
func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
var data []byte
|
||||
var err error
|
||||
|
||||
// we calculate the modulus of the requested offset with the size of a chunk
|
||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
if r.UseMemory {
|
||||
data, err = r.memory.GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
||||
fs.Debugf(r, "%v: chunk retry storage: %v", chunkStart, i)
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
}
|
||||
}
|
||||
|
||||
// not found in ram or
|
||||
// the worker didn't managed to download the chunk in time so we abort and close the stream
|
||||
if err != nil || len(data) == 0 || !found {
|
||||
if r.workers == 0 {
|
||||
fs.Errorf(r, "out of workers")
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("chunk not found %v", chunkStart)
|
||||
}
|
||||
|
||||
// first chunk will be aligned with the start
|
||||
if offset > 0 {
|
||||
if offset > int64(len(data)) {
|
||||
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size())
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
data = data[int(offset):]
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Read a chunk from storage or len(p)
|
||||
func (r *Handle) Read(p []byte) (n int, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
var buf []byte
|
||||
|
||||
// first reading
|
||||
if !r.reading {
|
||||
r.reading = true
|
||||
}
|
||||
// reached EOF
|
||||
if r.offset >= r.cachedObject.Size() {
|
||||
return 0, io.EOF
|
||||
}
|
||||
currentOffset := r.offset
|
||||
buf, err = r.getChunk(currentOffset)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(r, "(%v/%v) error (%v) response", currentOffset, r.cachedObject.Size(), err)
|
||||
}
|
||||
if len(buf) == 0 && err != io.ErrUnexpectedEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
readSize := copy(p, buf)
|
||||
newOffset := currentOffset + int64(readSize)
|
||||
r.offset = newOffset
|
||||
|
||||
return readSize, err
|
||||
}
|
||||
|
||||
// Close will tell the workers to stop
|
||||
func (r *Handle) Close() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.closed {
|
||||
return errors.New("file already closed")
|
||||
}
|
||||
|
||||
close(r.preloadQueue)
|
||||
r.closed = true
|
||||
// wait for workers to complete their jobs before returning
|
||||
r.workersWg.Wait()
|
||||
r.memory.db.Flush()
|
||||
|
||||
fs.Debugf(r, "cache reader closed %v", r.offset)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Seek will move the current offset based on whence and instruct the workers to move there too
|
||||
func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
var err error
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset)
|
||||
r.offset = offset
|
||||
case io.SeekCurrent:
|
||||
fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset)
|
||||
r.offset += offset
|
||||
case io.SeekEnd:
|
||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||
r.offset = r.cachedObject.Size() + offset
|
||||
default:
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
return r.offset, err
|
||||
}
|
||||
|
||||
type worker struct {
|
||||
r *Handle
|
||||
rc io.ReadCloser
|
||||
id int
|
||||
}
|
||||
|
||||
// String is a representation of this worker
|
||||
func (w *worker) String() string {
|
||||
return fmt.Sprintf("worker-%v <%v>", w.id, w.r.cachedObject.Name)
|
||||
}
|
||||
|
||||
// reader will return a reader depending on the capabilities of the source reader:
|
||||
// - if it supports seeking it will seek to the desired offset and return the same reader
|
||||
// - if it doesn't support seeking it will close a possible existing one and open at the desired offset
|
||||
// - if there's no reader associated with this worker, it will create one
|
||||
func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error) {
|
||||
var err error
|
||||
r := w.rc
|
||||
if w.rc == nil {
|
||||
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
if !closeOpen {
|
||||
if do, ok := r.(fs.RangeSeeker); ok {
|
||||
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset)
|
||||
return r, err
|
||||
} else if do, ok := r.(io.Seeker); ok {
|
||||
_, err = do.Seek(offset, io.SeekStart)
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
|
||||
_ = w.rc.Close()
|
||||
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
})
|
||||
}
|
||||
|
||||
// run is the main loop for the worker which receives offsets to preload
|
||||
func (w *worker) run() {
|
||||
var err error
|
||||
var data []byte
|
||||
defer func() {
|
||||
if w.rc != nil {
|
||||
_ = w.rc.Close()
|
||||
}
|
||||
w.r.workersWg.Done()
|
||||
}()
|
||||
|
||||
for {
|
||||
chunkStart, open := <-w.r.preloadQueue
|
||||
if chunkStart < 0 || !open {
|
||||
break
|
||||
}
|
||||
|
||||
// skip if it exists
|
||||
if w.r.UseMemory {
|
||||
if w.r.memory.HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
|
||||
// add it in ram if it's in the persistent storage
|
||||
data, err = w.r.storage().GetChunk(w.r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
// TODO: Remove this comment if it proves to be reliable for #1896
|
||||
//if chunkEnd > w.r.cachedObject.Size() {
|
||||
// chunkEnd = w.r.cachedObject.Size()
|
||||
//}
|
||||
|
||||
w.download(chunkStart, chunkEnd, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
var err error
|
||||
var data []byte
|
||||
|
||||
// stop retries
|
||||
if retry >= w.r.cacheFs().opt.ReadRetries {
|
||||
return
|
||||
}
|
||||
// back-off between retries
|
||||
if retry > 0 {
|
||||
time.Sleep(time.Second * time.Duration(retry))
|
||||
}
|
||||
|
||||
closeOpen := false
|
||||
if retry > 0 {
|
||||
closeOpen = true
|
||||
}
|
||||
w.rc, err = w.reader(chunkStart, chunkEnd, closeOpen)
|
||||
// we seem to be getting only errors so we abort
|
||||
if err != nil {
|
||||
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
||||
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "%v", err)
|
||||
}
|
||||
w.download(chunkStart, chunkEnd, retry+1)
|
||||
return
|
||||
}
|
||||
|
||||
data = make([]byte, chunkEnd-chunkStart)
|
||||
var sourceRead int
|
||||
sourceRead, err = io.ReadFull(w.rc, data)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "%v", err)
|
||||
}
|
||||
w.download(chunkStart, chunkEnd, retry+1)
|
||||
return
|
||||
}
|
||||
data = data[:sourceRead] // reslice to remove extra garbage
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(w, "partial downloaded chunk %v", fs.SizeSuffix(chunkStart))
|
||||
} else {
|
||||
fs.Debugf(w, "downloaded chunk %v", chunkStart)
|
||||
}
|
||||
|
||||
if w.r.UseMemory {
|
||||
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = w.r.storage().AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in storage %v: %v", chunkStart, err)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// BackgroundUploadStarted is a state for a temp file that has started upload
|
||||
BackgroundUploadStarted = iota
|
||||
// BackgroundUploadCompleted is a state for a temp file that has completed upload
|
||||
BackgroundUploadCompleted
|
||||
// BackgroundUploadError is a state for a temp file that has an error upload
|
||||
BackgroundUploadError
|
||||
)
|
||||
|
||||
// BackgroundUploadState is an entity that maps to an existing file which is stored on the temp fs
|
||||
type BackgroundUploadState struct {
|
||||
Remote string
|
||||
Status int
|
||||
Error error
|
||||
}
|
||||
|
||||
type backgroundWriter struct {
|
||||
fs *Fs
|
||||
stateCh chan int
|
||||
running bool
|
||||
notifyCh chan BackgroundUploadState
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newBackgroundWriter(f *Fs) *backgroundWriter {
|
||||
b := &backgroundWriter{
|
||||
fs: f,
|
||||
stateCh: make(chan int),
|
||||
notifyCh: make(chan BackgroundUploadState),
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) close() {
|
||||
b.stateCh <- 2
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.running = false
|
||||
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) pause() {
|
||||
b.stateCh <- 1
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) play() {
|
||||
b.stateCh <- 0
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) isRunning() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.running
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) notify(remote string, status int, err error) {
|
||||
state := BackgroundUploadState{
|
||||
Remote: remote,
|
||||
Status: status,
|
||||
Error: err,
|
||||
}
|
||||
select {
|
||||
case b.notifyCh <- state:
|
||||
fs.Debugf(remote, "notified background upload state: %v", state.Status)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) run() {
|
||||
state := 0
|
||||
for {
|
||||
b.mu.Lock()
|
||||
b.running = true
|
||||
b.mu.Unlock()
|
||||
select {
|
||||
case s := <-b.stateCh:
|
||||
state = s
|
||||
default:
|
||||
//
|
||||
}
|
||||
switch state {
|
||||
case 1:
|
||||
runtime.Gosched()
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
case 2:
|
||||
return
|
||||
}
|
||||
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime))
|
||||
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
remote := b.fs.cleanRootFromPath(absPath)
|
||||
b.notify(remote, BackgroundUploadStarted, nil)
|
||||
fs.Infof(remote, "background upload: started upload")
|
||||
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
||||
if err != nil {
|
||||
b.notify(remote, BackgroundUploadError, err)
|
||||
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
||||
fs.Errorf(remote, "background upload: %v", err)
|
||||
continue
|
||||
}
|
||||
// clean empty dirs up to root
|
||||
thisDir := cleanPath(path.Dir(remote))
|
||||
for thisDir != "" {
|
||||
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(thisList) > 0 {
|
||||
break
|
||||
}
|
||||
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir)
|
||||
fs.Debugf(thisDir, "cleaned from temp path")
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
thisDir = cleanPath(path.Dir(thisDir))
|
||||
}
|
||||
fs.Infof(remote, "background upload: uploaded entry")
|
||||
err = b.fs.cache.removePendingUpload(absPath)
|
||||
if err != nil && !strings.Contains(err.Error(), "pending upload not found") {
|
||||
fs.Errorf(remote, "background upload: %v", err)
|
||||
}
|
||||
parentCd := NewDirectory(b.fs, cleanPath(path.Dir(remote)))
|
||||
err = b.fs.cache.ExpireDir(parentCd)
|
||||
if err != nil {
|
||||
fs.Errorf(parentCd, "background upload: cache expire error: %v", err)
|
||||
}
|
||||
b.fs.notifyChangeUpstream(remote, fs.EntryObject)
|
||||
fs.Infof(remote, "finished background upload")
|
||||
b.notify(remote, BackgroundUploadCompleted, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ io.ReadCloser = (*Handle)(nil)
|
||||
_ io.Seeker = (*Handle)(nil)
|
||||
)
|
||||
372
backend/cache/object.go
vendored
Normal file
372
backend/cache/object.go
vendored
Normal file
@@ -0,0 +1,372 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
const (
|
||||
objectInCache = "Object"
|
||||
objectPendingUpload = "TempObject"
|
||||
)
|
||||
|
||||
// Object is a generic file like object that stores basic information about it
|
||||
type Object struct {
|
||||
fs.Object `json:"-"`
|
||||
|
||||
ParentFs fs.Fs `json:"-"` // parent fs
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the object
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
CacheStorable bool `json:"storable"` // says whether this object can be stored
|
||||
CacheType string `json:"cacheType"`
|
||||
CacheTs time.Time `json:"cacheTs"`
|
||||
CacheHashes map[hash.Type]string // all supported hashes cached
|
||||
|
||||
refreshMutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewObject builds one from a generic fs.Object
|
||||
func NewObject(f *Fs, remote string) *Object {
|
||||
fullRemote := path.Join(f.Root(), remote)
|
||||
dir, name := path.Split(fullRemote)
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.opt.TempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
parentFs = f.tempFs
|
||||
fs.Debugf(fullRemote, "pending upload found")
|
||||
}
|
||||
}
|
||||
|
||||
co := &Object{
|
||||
ParentFs: parentFs,
|
||||
CacheFs: f,
|
||||
Name: cleanPath(name),
|
||||
Dir: cleanPath(dir),
|
||||
CacheModTime: time.Now().UnixNano(),
|
||||
CacheSize: 0,
|
||||
CacheStorable: false,
|
||||
CacheType: cacheType,
|
||||
CacheTs: time.Now(),
|
||||
}
|
||||
return co
|
||||
}
|
||||
|
||||
// ObjectFromOriginal builds one from a generic fs.Object
|
||||
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
|
||||
var co *Object
|
||||
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
||||
dir, name := path.Split(fullRemote)
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.opt.TempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
parentFs = f.tempFs
|
||||
fs.Debugf(fullRemote, "pending upload found")
|
||||
}
|
||||
}
|
||||
|
||||
co = &Object{
|
||||
ParentFs: parentFs,
|
||||
CacheFs: f,
|
||||
Name: cleanPath(name),
|
||||
Dir: cleanPath(dir),
|
||||
CacheType: cacheType,
|
||||
CacheTs: time.Now(),
|
||||
}
|
||||
co.updateData(ctx, o)
|
||||
return co
|
||||
}
|
||||
|
||||
func (o *Object) updateData(ctx context.Context, source fs.Object) {
|
||||
o.Object = source
|
||||
o.CacheModTime = source.ModTime(ctx).UnixNano()
|
||||
o.CacheSize = source.Size()
|
||||
o.CacheStorable = source.Storable()
|
||||
o.CacheTs = time.Now()
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
}
|
||||
|
||||
// Fs returns its FS info
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.CacheFs
|
||||
}
|
||||
|
||||
// String returns a human friendly name for this object
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
p := path.Join(o.Dir, o.Name)
|
||||
return o.CacheFs.cleanRootFromPath(p)
|
||||
}
|
||||
|
||||
// abs returns the absolute path to the object
|
||||
func (o *Object) abs() string {
|
||||
return path.Join(o.Dir, o.Name)
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
_ = o.refresh(ctx)
|
||||
return time.Unix(0, o.CacheModTime)
|
||||
}
|
||||
|
||||
// Size returns the cached Size
|
||||
func (o *Object) Size() int64 {
|
||||
_ = o.refresh(context.TODO())
|
||||
return o.CacheSize
|
||||
}
|
||||
|
||||
// Storable returns the cached Storable
|
||||
func (o *Object) Storable() bool {
|
||||
_ = o.refresh(context.TODO())
|
||||
return o.CacheStorable
|
||||
}
|
||||
|
||||
// refresh will check if the object info is expired and request the info from source if it is
|
||||
// all these conditions must be true to ignore a refresh
|
||||
// 1. cache ts didn't expire yet
|
||||
// 2. is not pending a notification from the wrapped fs
|
||||
func (o *Object) refresh(ctx context.Context) error {
|
||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||
if !isExpired && !isNotified {
|
||||
return nil
|
||||
}
|
||||
|
||||
return o.refreshFromSource(ctx, true)
|
||||
}
|
||||
|
||||
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
||||
func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
||||
o.refreshMutex.Lock()
|
||||
defer o.refreshMutex.Unlock()
|
||||
var err error
|
||||
var liveObject fs.Object
|
||||
|
||||
if o.Object != nil && !force {
|
||||
return nil
|
||||
}
|
||||
if o.isTempFile() {
|
||||
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||
} else {
|
||||
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||
return err
|
||||
}
|
||||
o.updateData(ctx, liveObject)
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModTime sets the ModTime of this object
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := o.Object.SetModTime(ctx, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.CacheModTime = t.UnixNano()
|
||||
o.persist()
|
||||
fs.Debugf(o, "updated ModTime: %v", t)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open is used to request a specific part of the file using fs.RangeOption
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var err error
|
||||
|
||||
if o.Object == nil {
|
||||
err = o.refreshFromSource(ctx, true)
|
||||
} else {
|
||||
err = o.refresh(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheReader := NewObjectHandle(ctx, o, o.CacheFs)
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
}
|
||||
_, err = cacheReader.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return readers.NewLimitedReadCloser(cacheReader, limit), nil
|
||||
}
|
||||
|
||||
// Update will change the object data
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.opt.TempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return errors.Errorf("%v is currently uploading, can't update", o)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||
|
||||
// FIXME use reliable upload
|
||||
err := o.Object.Update(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error updating source: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// deleting cached chunks and info to be replaced with new ones
|
||||
_ = o.CacheFs.cache.RemoveObject(o.abs())
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
||||
|
||||
o.CacheModTime = src.ModTime(ctx).UnixNano()
|
||||
o.CacheSize = src.Size()
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
o.CacheTs = time.Now()
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the object from both the cache and the source
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.opt.TempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||
}
|
||||
}
|
||||
err := o.Object.Remove(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Debugf(o, "removing object")
|
||||
_ = o.CacheFs.cache.RemoveObject(o.abs())
|
||||
_ = o.CacheFs.cache.removePendingUpload(o.abs())
|
||||
parentCd := NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote())))
|
||||
_ = o.CacheFs.cache.ExpireDir(parentCd)
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
o.CacheFs.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash requests a hash of the object and stores in the cache
|
||||
// since it might or might not be called, this is lazy loaded
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
_ = o.refresh(ctx)
|
||||
if o.CacheHashes == nil {
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
}
|
||||
|
||||
cachedHash, found := o.CacheHashes[ht]
|
||||
if found {
|
||||
return cachedHash, nil
|
||||
}
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return "", err
|
||||
}
|
||||
liveHash, err := o.Object.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
o.CacheHashes[ht] = liveHash
|
||||
|
||||
o.persist()
|
||||
fs.Debugf(o, "object hash cached: %v", liveHash)
|
||||
|
||||
return liveHash, nil
|
||||
}
|
||||
|
||||
// persist adds this object to the persistent cache
|
||||
func (o *Object) persist() *Object {
|
||||
err := o.CacheFs.cache.AddObject(o)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "failed to cache object: %v", err)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Object) isTempFile() bool {
|
||||
_, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
|
||||
if err != nil {
|
||||
o.CacheType = objectInCache
|
||||
return false
|
||||
}
|
||||
|
||||
o.CacheType = objectPendingUpload
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) tempFileStartedUpload() bool {
|
||||
started, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return started
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
)
|
||||
298
backend/cache/plex.go
vendored
Normal file
298
backend/cache/plex.go
vendored
Normal file
@@ -0,0 +1,298 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
const (
|
||||
// defPlexLoginURL is the default URL for Plex login
|
||||
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
|
||||
defPlexNotificationURL = "%s/:/websockets/notifications?X-Plex-Token=%s"
|
||||
)
|
||||
|
||||
// PlaySessionStateNotification is part of the API response of Plex
|
||||
type PlaySessionStateNotification struct {
|
||||
SessionKey string `json:"sessionKey"`
|
||||
GUID string `json:"guid"`
|
||||
Key string `json:"key"`
|
||||
ViewOffset int64 `json:"viewOffset"`
|
||||
State string `json:"state"`
|
||||
TranscodeSession string `json:"transcodeSession"`
|
||||
}
|
||||
|
||||
// NotificationContainer is part of the API response of Plex
|
||||
type NotificationContainer struct {
|
||||
Type string `json:"type"`
|
||||
Size int `json:"size"`
|
||||
PlaySessionState []PlaySessionStateNotification `json:"PlaySessionStateNotification"`
|
||||
}
|
||||
|
||||
// PlexNotification is part of the API response of Plex
|
||||
type PlexNotification struct {
|
||||
Container NotificationContainer `json:"NotificationContainer"`
|
||||
}
|
||||
|
||||
// plexConnector is managing the cache integration with Plex
|
||||
type plexConnector struct {
|
||||
url *url.URL
|
||||
username string
|
||||
password string
|
||||
token string
|
||||
insecure bool
|
||||
f *Fs
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
runningMu sync.Mutex
|
||||
stateCache *cache.Cache
|
||||
saveToken func(string)
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
username: username,
|
||||
password: password,
|
||||
token: "",
|
||||
insecure: insecure,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
saveToken: saveToken,
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
token: token,
|
||||
insecure: insecure,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
}
|
||||
pc.listenWebsocket()
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
func (p *plexConnector) closeWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
fs.Infof("plex", "stopped Plex watcher")
|
||||
p.running = false
|
||||
}
|
||||
|
||||
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
|
||||
u := strings.TrimRight(strings.Replace(strings.Replace(
|
||||
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
|
||||
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
|
||||
|
||||
config, err := websocket.NewConfig(url, "http://localhost")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.insecure {
|
||||
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
}
|
||||
return websocket.DialConfig(config)
|
||||
}
|
||||
|
||||
func (p *plexConnector) listenWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
|
||||
conn, err := p.websocketDial()
|
||||
if err != nil {
|
||||
fs.Errorf("plex", "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
p.running = true
|
||||
go func() {
|
||||
for {
|
||||
if !p.isConnected() {
|
||||
break
|
||||
}
|
||||
|
||||
notif := &PlexNotification{}
|
||||
err := websocket.JSON.Receive(conn, notif)
|
||||
if err != nil {
|
||||
fs.Debugf("plex", "%v", err)
|
||||
p.closeWebsocket()
|
||||
break
|
||||
}
|
||||
// we're only interested in play events
|
||||
if notif.Container.Type == "playing" {
|
||||
// we loop through each of them
|
||||
for _, v := range notif.Container.PlaySessionState {
|
||||
// event type of playing
|
||||
if v.State == "playing" {
|
||||
// if it's not cached get the details and cache them
|
||||
if _, found := p.stateCache.Get(v.Key); !found {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), v.Key), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.stateCache.Set(v.Key, data, cache.DefaultExpiration)
|
||||
}
|
||||
} else if v.State == "stopped" {
|
||||
p.stateCache.Delete(v.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// fillDefaultHeaders will add common headers to requests
|
||||
func (p *plexConnector) fillDefaultHeaders(req *http.Request) {
|
||||
req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String()))
|
||||
req.Header.Add("X-Plex-Product", fmt.Sprintf("rclone (%v)", p.f.Name()))
|
||||
req.Header.Add("X-Plex-Version", fs.Version)
|
||||
req.Header.Add("Accept", "application/json")
|
||||
if p.token != "" {
|
||||
req.Header.Add("X-Plex-Token", p.token)
|
||||
}
|
||||
}
|
||||
|
||||
// authenticate will generate a token based on a username/password
|
||||
func (p *plexConnector) authenticate() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
form := url.Values{}
|
||||
form.Set("user[login]", p.username)
|
||||
form.Add("user[password]", p.password)
|
||||
req, err := http.NewRequest("POST", defPlexLoginURL, strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %v", err)
|
||||
}
|
||||
tokenGen, ok := get(data, "user", "authToken")
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to obtain token: %v", data)
|
||||
}
|
||||
token, ok := tokenGen.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to obtain token: %v", data)
|
||||
}
|
||||
p.token = token
|
||||
if p.token != "" {
|
||||
if p.saveToken != nil {
|
||||
p.saveToken(p.token)
|
||||
}
|
||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||
}
|
||||
p.listenWebsocket()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isConnected checks if this rclone is authenticated to Plex
|
||||
func (p *plexConnector) isConnected() bool {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
return p.running
|
||||
}
|
||||
|
||||
// isConfigured checks if this rclone is configured to use a Plex server
|
||||
func (p *plexConnector) isConfigured() bool {
|
||||
return p.url != nil
|
||||
}
|
||||
|
||||
func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
var err error
|
||||
if !p.isConnected() {
|
||||
p.listenWebsocket()
|
||||
}
|
||||
|
||||
remote := co.Remote()
|
||||
if cr, yes := p.f.isWrappedByCrypt(); yes {
|
||||
remote, err = cr.DecryptFileName(co.Remote())
|
||||
if err != nil {
|
||||
fs.Debugf("plex", "can not decrypt wrapped file: %v", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
isPlaying := false
|
||||
for _, v := range p.stateCache.Items() {
|
||||
if bytes.Contains(v.Object.([]byte), []byte(remote)) {
|
||||
isPlaying = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return isPlaying
|
||||
}
|
||||
|
||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
for _, p := range path {
|
||||
switch idx := p.(type) {
|
||||
case string:
|
||||
if mm, ok := m.(map[string]interface{}); ok {
|
||||
if val, found := mm[idx]; found {
|
||||
m = val
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
case int:
|
||||
if mm, ok := m.([]interface{}); ok {
|
||||
if len(mm) > idx {
|
||||
m = mm[idx]
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
return m, true
|
||||
}
|
||||
98
backend/cache/storage_memory.go
vendored
Normal file
98
backend/cache/storage_memory.go
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Memory is a wrapper of transient storage for a go-cache store
|
||||
type Memory struct {
|
||||
db *cache.Cache
|
||||
}
|
||||
|
||||
// NewMemory builds this cache storage
|
||||
// defaultExpiration will set the expiry time of chunks in this storage
|
||||
func NewMemory(defaultExpiration time.Duration) *Memory {
|
||||
mem := &Memory{}
|
||||
err := mem.Connect(defaultExpiration)
|
||||
if err != nil {
|
||||
fs.Errorf("cache", "can't open ram connection: %v", err)
|
||||
}
|
||||
|
||||
return mem
|
||||
}
|
||||
|
||||
// Connect will create a connection for the storage
|
||||
func (m *Memory) Connect(defaultExpiration time.Duration) error {
|
||||
m.db = cache.New(defaultExpiration, -1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasChunk confirms the existence of a single chunk of an object
|
||||
func (m *Memory) HasChunk(cachedObject *Object, offset int64) bool {
|
||||
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
|
||||
_, found := m.db.Get(key)
|
||||
return found
|
||||
}
|
||||
|
||||
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
|
||||
func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
||||
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
|
||||
var data []byte
|
||||
|
||||
if x, found := m.db.Get(key); found {
|
||||
data = x.([]byte)
|
||||
return data, nil
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
|
||||
}
|
||||
|
||||
// AddChunk adds a new chunk of a cached object
|
||||
func (m *Memory) AddChunk(fp string, data []byte, offset int64) error {
|
||||
return m.AddChunkAhead(fp, data, offset, time.Second)
|
||||
}
|
||||
|
||||
// AddChunkAhead adds a new chunk of a cached object
|
||||
func (m *Memory) AddChunkAhead(fp string, data []byte, offset int64, t time.Duration) error {
|
||||
key := fp + "-" + strconv.FormatInt(offset, 10)
|
||||
m.db.Set(key, data, cache.DefaultExpiration)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanChunksByAge will cleanup on a cron basis
|
||||
func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
|
||||
m.db.DeleteExpired()
|
||||
}
|
||||
|
||||
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
||||
func (m *Memory) CleanChunksByNeed(offset int64) {
|
||||
var items map[string]cache.Item
|
||||
|
||||
items = m.db.Items()
|
||||
for key := range items {
|
||||
sepIdx := strings.LastIndex(key, "-")
|
||||
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
||||
if err != nil {
|
||||
fs.Errorf("cache", "couldn't parse offset entry %v", key)
|
||||
continue
|
||||
}
|
||||
|
||||
if keyOffset < offset {
|
||||
m.db.Delete(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
|
||||
func (m *Memory) CleanChunksBySize(maxSize int64) {
|
||||
// NOOP
|
||||
}
|
||||
1098
backend/cache/storage_persistent.go
vendored
Normal file
1098
backend/cache/storage_persistent.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1090
backend/crypt/cipher.go
Normal file
1090
backend/crypt/cipher.go
Normal file
File diff suppressed because it is too large
Load Diff
1296
backend/crypt/cipher_test.go
Normal file
1296
backend/crypt/cipher_test.go
Normal file
File diff suppressed because it is too large
Load Diff
897
backend/crypt/crypt.go
Normal file
897
backend/crypt/crypt.go
Normal file
@@ -0,0 +1,897 @@
|
||||
// Package crypt provides wrappers for Fs and Object which implement encryption
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Globals
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "crypt",
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "filename_encryption",
|
||||
Help: "How to encrypt the filenames.",
|
||||
Default: "standard",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
|
||||
}, {
|
||||
Value: "standard",
|
||||
Help: "Encrypt the filenames see the docs for the details.",
|
||||
}, {
|
||||
Value: "obfuscate",
|
||||
Help: "Very simple filename obfuscation.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Default: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "true",
|
||||
Help: "Encrypt directory names.",
|
||||
},
|
||||
{
|
||||
Value: "false",
|
||||
Help: "Don't encrypt directory names, leave them intact.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Password or pass phrase for encryption.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "show_mapping",
|
||||
Help: `For all files listed show how the names encrypt.
|
||||
|
||||
If this flag is set then for each file that the remote is asked to
|
||||
list, it will log (at level INFO) a line stating the decrypted file
|
||||
name and the encrypted file name.
|
||||
|
||||
This is so you can work out which encrypted names are which decrypted
|
||||
names just in case you need to do something with the encrypted file
|
||||
names, or for debugging purposes.`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// newCipherForConfig constructs a Cipher for the given config name
|
||||
func newCipherForConfig(opt *Options) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Password == "" {
|
||||
return nil, errors.New("password not set in config file")
|
||||
}
|
||||
password, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||
}
|
||||
var salt string
|
||||
if opt.Password2 != "" {
|
||||
salt, err = obscure.Reveal(opt.Password2)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||
}
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
}
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
// NewCipher constructs a Cipher for the given config
|
||||
func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newCipherForConfig(opt)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cipher, err := newCipherForConfig(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := opt.Remote
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
cipher: cipher,
|
||||
}
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
FilenameEncryption string `config:"filename_encryption"`
|
||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||
Password string `config:"password"`
|
||||
Password2 string `config:"password2"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features // optional features
|
||||
cipher Cipher
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Encrypted drive '%s:%s'", f.name, f.root)
|
||||
}
|
||||
|
||||
// Encrypt an object file name to entries.
|
||||
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||
remote := obj.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||
return
|
||||
}
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newObject(obj))
|
||||
}
|
||||
|
||||
// Encrypt an directory file name to entries.
|
||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||
return
|
||||
}
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newDir(ctx, dir))
|
||||
}
|
||||
|
||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||
newEntries = entries[:0] // in place filter
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
f.add(&newEntries, x)
|
||||
case fs.Directory:
|
||||
f.addDir(ctx, &newEntries, x)
|
||||
default:
|
||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.encryptEntries(ctx, entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
||||
newEntries, err := f.encryptEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(newEntries)
|
||||
})
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// unwrap the accounting
|
||||
var wrap accounting.WrapFn
|
||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||
// add the hasher
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
// wrap the accounting back on
|
||||
wrappedIn = wrap(wrappedIn)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check the hashes of the encrypted data if we were comparing them
|
||||
if ht != hash.None && hasher != nil {
|
||||
srcHash := hasher.Sums()[ht]
|
||||
var dstHash string
|
||||
dstHash, err = o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
}
|
||||
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(ctx, in, src, options, f.Fs.Put)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(ctx, in, src, options, f.Fs.Features().PutStream)
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
do := f.Fs.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(oResult), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Move
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(oResult), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
do := f.Fs.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.Fs.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
}
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// EncryptFileName returns an encrypted file name
|
||||
func (f *Fs) EncryptFileName(fileName string) string {
|
||||
return f.cipher.EncryptFileName(fileName)
|
||||
}
|
||||
|
||||
// DecryptFileName returns a decrypted file name
|
||||
func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
return f.cipher.DecryptFileName(encryptedFileName)
|
||||
}
|
||||
|
||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||
// src with it, and calculates the hash given by HashType on the fly
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||
// use a limited read so we only read the header
|
||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
d, err := f.cipher.(*cipher).newDecrypter(in)
|
||||
if err != nil {
|
||||
_ = in.Close()
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
nonce := d.nonce
|
||||
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
||||
|
||||
// Check nonce isn't all zeros
|
||||
isZero := true
|
||||
for i := range nonce {
|
||||
if nonce[i] != 0 {
|
||||
isZero = false
|
||||
}
|
||||
}
|
||||
if isZero {
|
||||
fs.Errorf(o, "empty nonce read")
|
||||
}
|
||||
|
||||
// Close d (and hence in) once we have read the nonce
|
||||
err = d.Close()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to close nonce read")
|
||||
}
|
||||
|
||||
// Open the src for input
|
||||
in, err = src.Open(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
// Now encrypt the src with the nonce
|
||||
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make encrypter")
|
||||
}
|
||||
|
||||
// pipe into hash
|
||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make hasher")
|
||||
}
|
||||
_, err = io.Copy(m, out)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to hash data")
|
||||
}
|
||||
|
||||
return m.Sums()[hashType], nil
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
do := f.Fs.Features().MergeDirs
|
||||
if do == nil {
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
out := make([]fs.Directory, len(dirs))
|
||||
for i, dir := range dirs {
|
||||
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||
}
|
||||
return do(ctx, out)
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
do := f.Fs.Features().DirCacheFlush
|
||||
if do != nil {
|
||||
do()
|
||||
}
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
do := f.Fs.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", errors.New("PublicLink not supported")
|
||||
}
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
// assume it is a directory
|
||||
return do(ctx, f.cipher.EncryptDirName(remote))
|
||||
}
|
||||
return do(ctx, o.(*Object).Object.Remote())
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
do := f.Fs.Features().ChangeNotify
|
||||
if do == nil {
|
||||
return
|
||||
}
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||
var (
|
||||
err error
|
||||
decrypted string
|
||||
)
|
||||
switch entryType {
|
||||
case fs.EntryDirectory:
|
||||
decrypted, err = f.cipher.DecryptDirName(path)
|
||||
case fs.EntryObject:
|
||||
decrypted, err = f.cipher.DecryptFileName(path)
|
||||
default:
|
||||
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
do(ctx, wrappedNotifyFunc, pollIntervalChan)
|
||||
}
|
||||
|
||||
// Object describes a wrapped for being read from the Fs
|
||||
//
|
||||
// This decrypts the remote name and decrypts the data
|
||||
type Object struct {
|
||||
fs.Object
|
||||
f *Fs
|
||||
}
|
||||
|
||||
func (f *Fs) newObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
remote := o.Object.Remote()
|
||||
decryptedName, err := o.f.cipher.DecryptFileName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Undecryptable file name: %v", err)
|
||||
return remote
|
||||
}
|
||||
return decryptedName
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// UnWrap returns the wrapped Object
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
// pass on Options to underlying open if appropriate
|
||||
openOptions = append(openOptions, option)
|
||||
}
|
||||
}
|
||||
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
if underlyingOffset == 0 && underlyingLimit < 0 {
|
||||
// Open with no seek
|
||||
return o.Object.Open(ctx, openOptions...)
|
||||
}
|
||||
// Open stream with a range of underlyingOffset, underlyingLimit
|
||||
end := int64(-1)
|
||||
if underlyingLimit >= 0 {
|
||||
end = underlyingOffset + underlyingLimit - 1
|
||||
if end >= o.Object.Size() {
|
||||
end = -1
|
||||
}
|
||||
}
|
||||
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
||||
return o.Object.Open(ctx, newOpenOptions...)
|
||||
}, offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return o.Object, o.Object.Update(ctx, in, src, options...)
|
||||
}
|
||||
_, err := o.f.put(ctx, in, src, options, update)
|
||||
return err
|
||||
}
|
||||
|
||||
// newDir returns a dir with the Name decrypted
|
||||
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||
newDir := fs.NewDirCopy(ctx, dir)
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||
} else {
|
||||
newDir.SetRemote(decryptedRemote)
|
||||
}
|
||||
return newDir
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
//
|
||||
// This encrypts the remote name and adjusts the size
|
||||
type ObjectInfo struct {
|
||||
fs.ObjectInfo
|
||||
f *Fs
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
|
||||
return &ObjectInfo{
|
||||
ObjectInfo: src,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *ObjectInfo) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *ObjectInfo) Remote() string {
|
||||
return o.f.cipher.EncryptFileName(o.ObjectInfo.Remote())
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *ObjectInfo) Size() int64 {
|
||||
size := o.ObjectInfo.Size()
|
||||
if size < 0 {
|
||||
return size
|
||||
}
|
||||
return o.f.cipher.EncryptedSize(size)
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// SetTier performs changing storage tier of the Object if
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
do, ok := o.Object.(fs.SetTierer)
|
||||
if !ok {
|
||||
return errors.New("crypt: underlying remote does not support SetTier")
|
||||
}
|
||||
return do.SetTier(tier)
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *Object) GetTier() string {
|
||||
do, ok := o.Object.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
)
|
||||
93
backend/crypt/crypt_test.go
Normal file
93
backend/crypt/crypt_test.go
Normal file
@@ -0,0 +1,93 @@
|
||||
// Test Crypt filesystem interface
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive" // for integration tests
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/swift" // for integration tests
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name := "TestCrypt2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
// TestObfuscate runs integration tests against the remote
|
||||
func TestObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
63
backend/crypt/pkcs7/pkcs7.go
Normal file
63
backend/crypt/pkcs7/pkcs7.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// Package pkcs7 implements PKCS#7 padding
|
||||
//
|
||||
// This is a standard way of encoding variable length buffers into
|
||||
// buffers which are a multiple of an underlying crypto block size.
|
||||
package pkcs7
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// Errors Unpad can return
|
||||
var (
|
||||
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
|
||||
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
|
||||
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
|
||||
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
|
||||
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
|
||||
)
|
||||
|
||||
// Pad buf using PKCS#7 to a multiple of n.
|
||||
//
|
||||
// Appends the padding to buf - make a copy of it first if you don't
|
||||
// want it modified.
|
||||
func Pad(n int, buf []byte) []byte {
|
||||
if n <= 1 || n >= 256 {
|
||||
panic("bad multiple")
|
||||
}
|
||||
length := len(buf)
|
||||
padding := n - (length % n)
|
||||
for i := 0; i < padding; i++ {
|
||||
buf = append(buf, byte(padding))
|
||||
}
|
||||
if (len(buf) % n) != 0 {
|
||||
panic("padding failed")
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// Unpad buf using PKCS#7 from a multiple of n returning a slice of
|
||||
// buf or an error if malformed.
|
||||
func Unpad(n int, buf []byte) ([]byte, error) {
|
||||
if n <= 1 || n >= 256 {
|
||||
panic("bad multiple")
|
||||
}
|
||||
length := len(buf)
|
||||
if length == 0 {
|
||||
return nil, ErrorPaddingNotFound
|
||||
}
|
||||
if (length % n) != 0 {
|
||||
return nil, ErrorPaddingNotAMultiple
|
||||
}
|
||||
padding := int(buf[length-1])
|
||||
if padding > n {
|
||||
return nil, ErrorPaddingTooLong
|
||||
}
|
||||
if padding == 0 {
|
||||
return nil, ErrorPaddingTooShort
|
||||
}
|
||||
for i := 0; i < padding; i++ {
|
||||
if buf[length-1-i] != byte(padding) {
|
||||
return nil, ErrorPaddingNotAllTheSame
|
||||
}
|
||||
}
|
||||
return buf[:length-padding], nil
|
||||
}
|
||||
73
backend/crypt/pkcs7/pkcs7_test.go
Normal file
73
backend/crypt/pkcs7/pkcs7_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPad(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
n int
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{8, "", "\x08\x08\x08\x08\x08\x08\x08\x08"},
|
||||
{8, "1", "1\x07\x07\x07\x07\x07\x07\x07"},
|
||||
{8, "12", "12\x06\x06\x06\x06\x06\x06"},
|
||||
{8, "123", "123\x05\x05\x05\x05\x05"},
|
||||
{8, "1234", "1234\x04\x04\x04\x04"},
|
||||
{8, "12345", "12345\x03\x03\x03"},
|
||||
{8, "123456", "123456\x02\x02"},
|
||||
{8, "1234567", "1234567\x01"},
|
||||
{8, "abcdefgh", "abcdefgh\x08\x08\x08\x08\x08\x08\x08\x08"},
|
||||
{8, "abcdefgh1", "abcdefgh1\x07\x07\x07\x07\x07\x07\x07"},
|
||||
{8, "abcdefgh12", "abcdefgh12\x06\x06\x06\x06\x06\x06"},
|
||||
{8, "abcdefgh123", "abcdefgh123\x05\x05\x05\x05\x05"},
|
||||
{8, "abcdefgh1234", "abcdefgh1234\x04\x04\x04\x04"},
|
||||
{8, "abcdefgh12345", "abcdefgh12345\x03\x03\x03"},
|
||||
{8, "abcdefgh123456", "abcdefgh123456\x02\x02"},
|
||||
{8, "abcdefgh1234567", "abcdefgh1234567\x01"},
|
||||
{8, "abcdefgh12345678", "abcdefgh12345678\x08\x08\x08\x08\x08\x08\x08\x08"},
|
||||
{16, "", "\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10"},
|
||||
{16, "a", "a\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f"},
|
||||
} {
|
||||
actual := Pad(test.n, []byte(test.in))
|
||||
assert.Equal(t, test.expected, string(actual), fmt.Sprintf("Pad %d %q", test.n, test.in))
|
||||
recovered, err := Unpad(test.n, actual)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte(test.in), recovered, fmt.Sprintf("Unpad %d %q", test.n, test.in))
|
||||
}
|
||||
assert.Panics(t, func() { Pad(1, []byte("")) }, "bad multiple")
|
||||
assert.Panics(t, func() { Pad(256, []byte("")) }, "bad multiple")
|
||||
}
|
||||
|
||||
func TestUnpad(t *testing.T) {
|
||||
// We've tested the OK decoding in TestPad, now test the error cases
|
||||
for _, test := range []struct {
|
||||
n int
|
||||
in string
|
||||
err error
|
||||
}{
|
||||
{8, "", ErrorPaddingNotFound},
|
||||
{8, "1", ErrorPaddingNotAMultiple},
|
||||
{8, "12", ErrorPaddingNotAMultiple},
|
||||
{8, "123", ErrorPaddingNotAMultiple},
|
||||
{8, "1234", ErrorPaddingNotAMultiple},
|
||||
{8, "12345", ErrorPaddingNotAMultiple},
|
||||
{8, "123456", ErrorPaddingNotAMultiple},
|
||||
{8, "1234567", ErrorPaddingNotAMultiple},
|
||||
{8, "1234567\xFF", ErrorPaddingTooLong},
|
||||
{8, "1234567\x09", ErrorPaddingTooLong},
|
||||
{8, "1234567\x00", ErrorPaddingTooShort},
|
||||
{8, "123456\x01\x02", ErrorPaddingNotAllTheSame},
|
||||
{8, "\x07\x08\x08\x08\x08\x08\x08\x08", ErrorPaddingNotAllTheSame},
|
||||
} {
|
||||
result, actualErr := Unpad(test.n, []byte(test.in))
|
||||
assert.Equal(t, test.err, actualErr, fmt.Sprintf("Unpad %d %q", test.n, test.in))
|
||||
assert.Equal(t, result, []byte(nil))
|
||||
}
|
||||
assert.Panics(t, func() { _, _ = Unpad(1, []byte("")) }, "bad multiple")
|
||||
assert.Panics(t, func() { _, _ = Unpad(256, []byte("")) }, "bad multiple")
|
||||
}
|
||||
2871
backend/drive/drive.go
Normal file
2871
backend/drive/drive.go
Normal file
File diff suppressed because it is too large
Load Diff
287
backend/drive/drive_internal_test.go
Normal file
287
backend/drive/drive_internal_test.go
Normal file
@@ -0,0 +1,287 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantFlag bool
|
||||
}{
|
||||
{"", []string{
|
||||
"https://www.googleapis.com/auth/drive",
|
||||
}, false},
|
||||
{" drive.file , drive.readonly", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.readonly",
|
||||
}, false},
|
||||
{" drive.file , drive.appfolder", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.appfolder",
|
||||
}, true},
|
||||
} {
|
||||
got := driveScopes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
gotFlag := driveScopesContainsAppFolder(got)
|
||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
"application/vnd.ms-excel.template.macroenabled.12": ".xltm",
|
||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm",
|
||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm",
|
||||
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm",
|
||||
"application/vnd.ms-powerpoint": ".ppt",
|
||||
"application/vnd.ms-word.document.macroenabled.12": ".docm",
|
||||
"application/vnd.ms-word.template.macroenabled.12": ".dotm",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx",
|
||||
"application/vnd.sun.xml.writer": ".sxw",
|
||||
"text/richtext": ".rtf",
|
||||
}
|
||||
*/
|
||||
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
||||
fetchFormatsOnce.Do(func() {})
|
||||
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
var about struct {
|
||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, json.Unmarshal(buf, &about))
|
||||
_exportFormats = fixMimeTypeMap(about.ExportFormats)
|
||||
_importFormats = fixMimeTypeMap(about.ImportFormats)
|
||||
}
|
||||
|
||||
func TestInternalParseExtensions(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{".doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||
} {
|
||||
extensions, _, gotErr := parseExtensions(test.in)
|
||||
if test.wantErr == nil {
|
||||
assert.NoError(t, gotErr)
|
||||
} else {
|
||||
assert.EqualError(t, gotErr, test.wantErr.Error())
|
||||
}
|
||||
assert.Equal(t, test.want, extensions)
|
||||
}
|
||||
|
||||
// Test it is appending
|
||||
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx")
|
||||
assert.NoError(t, gotErr)
|
||||
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions)
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
item := &drive.File{
|
||||
Name: "file",
|
||||
MimeType: "application/vnd.google-apps.document",
|
||||
}
|
||||
for _, test := range []struct {
|
||||
extensions []string
|
||||
wantExtension string
|
||||
wantMimeType string
|
||||
}{
|
||||
{[]string{}, "", ""},
|
||||
{[]string{".pdf"}, ".pdf", "application/pdf"},
|
||||
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"},
|
||||
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"},
|
||||
{[]string{".xls", ".csv", ".svg"}, "", ""},
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.exportExtensions = test.extensions
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
if test.wantExtension != "" {
|
||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
||||
} else {
|
||||
assert.Equal(t, "", gotFilename)
|
||||
}
|
||||
assert.Equal(t, test.wantMimeType, gotMimeType)
|
||||
assert.Equal(t, true, gotIsDocument)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMimeTypesToExtension(t *testing.T) {
|
||||
for mimeType, extension := range _mimeTypeToExtension {
|
||||
extensions, err := mime.ExtensionsByType(mimeType)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, extensions, extension)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionToMimeType(t *testing.T) {
|
||||
for mimeType, extension := range _mimeTypeToExtension {
|
||||
gotMimeType := mime.TypeByExtension(extension)
|
||||
mediatype, _, err := mime.ParseMediaType(gotMimeType)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, mimeType, mediatype)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsForExportFormats(t *testing.T) {
|
||||
if _exportFormats == nil {
|
||||
t.Error("exportFormats == nil")
|
||||
}
|
||||
for fromMT, toMTs := range _exportFormats {
|
||||
for _, toMT := range toMTs {
|
||||
if !isInternalMimeType(toMT) {
|
||||
extensions, err := mime.ExtensionsByType(toMT)
|
||||
assert.NoError(t, err, "invalid MIME type %q", toMT)
|
||||
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsForImportFormats(t *testing.T) {
|
||||
t.Skip()
|
||||
if _importFormats == nil {
|
||||
t.Error("_importFormats == nil")
|
||||
}
|
||||
for fromMT := range _importFormats {
|
||||
if !isInternalMimeType(fromMT) {
|
||||
extensions, err := mime.ExtensionsByType(fromMT)
|
||||
assert.NoError(t, err, "invalid MIME type %q", fromMT)
|
||||
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
oldAllow := f.opt.AllowImportNameChange
|
||||
f.opt.AllowImportNameChange = true
|
||||
defer func() {
|
||||
f.opt.AllowImportNameChange = oldAllow
|
||||
}()
|
||||
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
f.exportExtensions, _, err = parseExtensions("txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject(context.Background(), "example2.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
_, err = io.Copy(&buf, rc)
|
||||
require.NoError(t, err)
|
||||
text := buf.String()
|
||||
|
||||
for _, excerpt := range []string{
|
||||
"Lorem ipsum dolor sit amet, consectetur",
|
||||
"porta at ultrices in, consectetur at augue.",
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
f.exportExtensions, _, err = parseExtensions("link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject(context.Background(), "example2.link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
_, err = io.Copy(&buf, rc)
|
||||
require.NoError(t, err)
|
||||
text := buf.String()
|
||||
|
||||
require.True(t, strings.HasPrefix(text, "<html>"))
|
||||
require.True(t, strings.HasSuffix(text, "</html>\n"))
|
||||
for _, excerpt := range []string{
|
||||
`<meta http-equiv="refresh"`,
|
||||
`Loading <a href="`,
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
f.InternalTestDocumentImport(t)
|
||||
t.Run("DocumentUpdate", func(t *testing.T) {
|
||||
f.InternalTestDocumentUpdate(t)
|
||||
t.Run("DocumentExport", func(t *testing.T) {
|
||||
f.InternalTestDocumentExport(t)
|
||||
t.Run("DocumentLink", func(t *testing.T) {
|
||||
f.InternalTestDocumentLink(t)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
35
backend/drive/drive_test.go
Normal file
35
backend/drive/drive_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
// Test Drive filesystem interface
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrive:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
CeilChunkSize: fstests.NextPowerOfTwo,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
178
backend/drive/test/about.json
Normal file
178
backend/drive/test/about.json
Normal file
@@ -0,0 +1,178 @@
|
||||
{
|
||||
"importFormats": {
|
||||
"text/tab-separated-values": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/jpeg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/bmp": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/gif": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-word.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"image/pjpeg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.google-apps.script+text/plain": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/vnd.ms-excel": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.sun.xml.writer": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-word.document.macroenabled.12": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"text/rtf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/plain": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.spreadsheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"image/png": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/x-vnd.oasis.opendocument.text": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/msword": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/pdf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/json": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/x-msmetafile": [
|
||||
"application/vnd.google-apps.drawing"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.ms-powerpoint": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-excel.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"image/x-bmp": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/rtf": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.template": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/x-png": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/html": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.text": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.script+json": [
|
||||
"application/vnd.google-apps.script"
|
||||
],
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"application/vnd.ms-powerpoint.template.macroenabled.12": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"text/csv": [
|
||||
"application/vnd.google-apps.spreadsheet"
|
||||
],
|
||||
"application/vnd.oasis.opendocument.presentation": [
|
||||
"application/vnd.google-apps.presentation"
|
||||
],
|
||||
"image/jpg": [
|
||||
"application/vnd.google-apps.document"
|
||||
],
|
||||
"text/richtext": [
|
||||
"application/vnd.google-apps.document"
|
||||
]
|
||||
},
|
||||
"exportFormats": {
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}
|
||||
}
|
||||
BIN
backend/drive/test/files/example1.ods
Normal file
BIN
backend/drive/test/files/example1.ods
Normal file
Binary file not shown.
BIN
backend/drive/test/files/example2.doc
Normal file
BIN
backend/drive/test/files/example2.doc
Normal file
Binary file not shown.
BIN
backend/drive/test/files/example3.odt
Normal file
BIN
backend/drive/test/files/example3.odt
Normal file
Binary file not shown.
@@ -11,7 +11,6 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -20,23 +19,23 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"google.golang.org/api/drive/v2"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const (
|
||||
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
|
||||
// Number of times to try each chunk
|
||||
maxTries = 10
|
||||
)
|
||||
|
||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
// It is not used by developers directly.
|
||||
type resumableUpload struct {
|
||||
f *FsDrive
|
||||
f *Fs
|
||||
remote string
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
@@ -51,39 +50,49 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
|
||||
fileId := info.Id
|
||||
var body io.Reader = nil
|
||||
body, err := googleapi.WithoutDataWrapper.JSONReader(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
params := url.Values{
|
||||
"alt": {"json"},
|
||||
"uploadType": {"resumable"},
|
||||
"fields": {partialFields},
|
||||
}
|
||||
params := make(url.Values)
|
||||
params.Set("alt", "json")
|
||||
params.Set("uploadType", "resumable")
|
||||
urls := "https://www.googleapis.com/upload/drive/v2/files"
|
||||
params.Set("supportsAllDrives", "true")
|
||||
if f.opt.KeepRevisionForever {
|
||||
params.Set("keepRevisionForever", "true")
|
||||
}
|
||||
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
||||
method := "POST"
|
||||
if fileId != "" {
|
||||
if fileID != "" {
|
||||
params.Set("setModifiedDate", "true")
|
||||
urls += "/{fileId}"
|
||||
method = "PUT"
|
||||
method = "PATCH"
|
||||
}
|
||||
urls += "?" + params.Encode()
|
||||
req, _ := http.NewRequest(method, urls, body)
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileId,
|
||||
})
|
||||
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
|
||||
req.Header.Set("X-Upload-Content-Type", contentType)
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
var res *http.Response
|
||||
f.call(&err, func() {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var body io.Reader
|
||||
body, err = googleapi.WithoutDataWrapper.JSONReader(info)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var req *http.Request
|
||||
req, err = http.NewRequest(method, urls, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
|
||||
req.Header.Set("X-Upload-Content-Type", contentType)
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
res, err = f.client.Do(req)
|
||||
if err == nil {
|
||||
defer googleapi.CloseBody(res)
|
||||
err = googleapi.CheckResponse(res)
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -101,9 +110,8 @@ func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *dri
|
||||
}
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(start int64, body []byte) *http.Request {
|
||||
reqSize := int64(len(body))
|
||||
req, _ := http.NewRequest("POST", rx.URI, bytes.NewBuffer(body))
|
||||
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||
req.ContentLength = reqSize
|
||||
if reqSize != 0 {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||
@@ -111,7 +119,6 @@ func (rx *resumableUpload) makeRequest(start int64, body []byte) *http.Request {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
|
||||
}
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
return req
|
||||
}
|
||||
|
||||
@@ -123,7 +130,7 @@ var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
|
||||
//
|
||||
// If error is nil, then start should be valid
|
||||
func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
req := rx.makeRequest(0, nil)
|
||||
req := rx.makeRequest(0, nil, 0)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -137,7 +144,7 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, fmt.Errorf("unexpected http return code %v", res.StatusCode)
|
||||
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
|
||||
}
|
||||
Range := res.Header.Get("Range")
|
||||
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
|
||||
@@ -146,12 +153,13 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
return start, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("unable to parse range %q", Range)
|
||||
return 0, errors.Errorf("unable to parse range %q", Range)
|
||||
}
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
func (rx *resumableUpload) transferChunk(start int64, body []byte) (int, error) {
|
||||
req := rx.makeRequest(start, body)
|
||||
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req := rx.makeRequest(start, chunk, chunkSize)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 599, err
|
||||
@@ -173,7 +181,7 @@ func (rx *resumableUpload) transferChunk(start int64, body []byte) (int, error)
|
||||
// been 200 OK.
|
||||
//
|
||||
// So parse the response out of the body. We aren't expecting
|
||||
// any other 2xx codes, so we parse it unconditionaly on
|
||||
// any other 2xx codes, so we parse it unconditionally on
|
||||
// StatusCode
|
||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||
return 598, err
|
||||
@@ -183,40 +191,34 @@ func (rx *resumableUpload) transferChunk(start int64, body []byte) (int, error)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
|
||||
// It retries each chunk using the pacer and --low-level-retries
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
start := int64(0)
|
||||
buf := make([]byte, chunkSize)
|
||||
var StatusCode int
|
||||
var err error
|
||||
buf := make([]byte, int(rx.f.opt.ChunkSize))
|
||||
for start < rx.ContentLength {
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
} else {
|
||||
buf = buf[:reqSize]
|
||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||
reqSize = int64(rx.f.opt.ChunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
|
||||
// Read the chunk
|
||||
_, err := io.ReadFull(rx.Media, buf)
|
||||
// Transfer the chunk
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
|
||||
again, err := shouldRetry(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
again = false
|
||||
err = nil
|
||||
}
|
||||
return again, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
for try := 1; try <= maxTries; try++ {
|
||||
fs.Debug(rx.remote, "Sending chunk %d length %d, %d/%d", start, reqSize, try, maxTries)
|
||||
rx.f.beginCall()
|
||||
StatusCode, err = rx.transferChunk(start, buf)
|
||||
rx.f.endCall(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
goto success
|
||||
}
|
||||
fs.Debug(rx.remote, "Retrying chunk %d/%d, code=%d, err=%v", try, maxTries, StatusCode, err)
|
||||
}
|
||||
fs.Debug(rx.remote, "Failed to send chunk")
|
||||
return nil, fs.RetryErrorf("Chunk upload failed - retry: code=%d, err=%v", StatusCode, err)
|
||||
success:
|
||||
|
||||
start += reqSize
|
||||
}
|
||||
// Resume or retry uploads that fail due to connection interruptions or
|
||||
@@ -240,7 +242,7 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
// Handle 404 Not Found errors when doing resumable uploads by starting
|
||||
// the entire upload over from the beginning.
|
||||
if rx.ret == nil {
|
||||
return nil, fs.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
|
||||
return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
|
||||
}
|
||||
return rx.ret, nil
|
||||
}
|
||||
127
backend/dropbox/dbhash/dbhash.go
Normal file
127
backend/dropbox/dbhash/dbhash.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Package dbhash implements the dropbox hash as described in
|
||||
//
|
||||
// https://www.dropbox.com/developers/reference/content-hash
|
||||
package dbhash
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize of the checksum in bytes.
|
||||
BlockSize = sha256.BlockSize
|
||||
// Size of the checksum in bytes.
|
||||
Size = sha256.BlockSize
|
||||
bytesPerBlock = 4 * 1024 * 1024
|
||||
hashReturnedError = "hash function returned error"
|
||||
)
|
||||
|
||||
type digest struct {
|
||||
n int // bytes written into blockHash so far
|
||||
blockHash hash.Hash
|
||||
totalHash hash.Hash
|
||||
sumCalled bool
|
||||
writtenMore bool
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the Dropbox checksum.
|
||||
func New() hash.Hash {
|
||||
d := &digest{}
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
// writeBlockHash writes the current block hash into the total hash
|
||||
func (d *digest) writeBlockHash() {
|
||||
blockHash := d.blockHash.Sum(nil)
|
||||
_, err := d.totalHash.Write(blockHash)
|
||||
if err != nil {
|
||||
panic(hashReturnedError)
|
||||
}
|
||||
// reset counters for blockhash
|
||||
d.n = 0
|
||||
d.blockHash.Reset()
|
||||
}
|
||||
|
||||
// Write writes len(p) bytes from p to the underlying data stream. It returns
|
||||
// the number of bytes written from p (0 <= n <= len(p)) and any error
|
||||
// encountered that caused the write to stop early. Write must return a non-nil
|
||||
// error if it returns n < len(p). Write must not modify the slice data, even
|
||||
// temporarily.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
for len(p) > 0 {
|
||||
d.writtenMore = true
|
||||
toWrite := bytesPerBlock - d.n
|
||||
if toWrite > len(p) {
|
||||
toWrite = len(p)
|
||||
}
|
||||
_, err = d.blockHash.Write(p[:toWrite])
|
||||
if err != nil {
|
||||
panic(hashReturnedError)
|
||||
}
|
||||
d.n += toWrite
|
||||
p = p[toWrite:]
|
||||
// Accumulate the total hash
|
||||
if d.n == bytesPerBlock {
|
||||
d.writeBlockHash()
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
//
|
||||
// TODO(ncw) Sum() can only be called once for this type of hash.
|
||||
// If you call Sum(), then Write() then Sum() it will result in
|
||||
// a panic. Calling Write() then Sum(), then Sum() is OK.
|
||||
func (d *digest) Sum(b []byte) []byte {
|
||||
if d.sumCalled && d.writtenMore {
|
||||
panic("digest.Sum() called more than once")
|
||||
}
|
||||
d.sumCalled = true
|
||||
d.writtenMore = false
|
||||
if d.n != 0 {
|
||||
d.writeBlockHash()
|
||||
}
|
||||
return d.totalHash.Sum(b)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (d *digest) Reset() {
|
||||
d.n = 0
|
||||
d.totalHash = sha256.New()
|
||||
d.blockHash = sha256.New()
|
||||
d.sumCalled = false
|
||||
d.writtenMore = false
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (d *digest) Size() int {
|
||||
return d.totalHash.Size()
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (d *digest) BlockSize() int {
|
||||
return d.totalHash.BlockSize()
|
||||
}
|
||||
|
||||
// Sum returns the Dropbox checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
var d digest
|
||||
d.Reset()
|
||||
_, _ = d.Write(data)
|
||||
var out [Size]byte
|
||||
d.Sum(out[:0])
|
||||
return out
|
||||
}
|
||||
|
||||
// must implement this interface
|
||||
var _ hash.Hash = (*digest)(nil)
|
||||
88
backend/dropbox/dbhash/dbhash_test.go
Normal file
88
backend/dropbox/dbhash/dbhash_test.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package dbhash_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func testChunk(t *testing.T, chunk int) {
|
||||
data := make([]byte, chunk)
|
||||
for i := 0; i < chunk; i++ {
|
||||
data[i] = 'A'
|
||||
}
|
||||
for _, test := range []struct {
|
||||
n int
|
||||
want string
|
||||
}{
|
||||
{0, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
|
||||
{1, "1cd6ef71e6e0ff46ad2609d403dc3fee244417089aa4461245a4e4fe23a55e42"},
|
||||
{2, "01e0655fb754d10418a73760f57515f4903b298e6d67dda6bf0987fa79c22c88"},
|
||||
{4096, "8620913d33852befe09f16fff8fd75f77a83160d29f76f07e0276e9690903035"},
|
||||
{4194303, "647c8627d70f7a7d13ce96b1e7710a771a55d41a62c3da490d92e56044d311fa"},
|
||||
{4194304, "d4d63bac5b866c71620185392a8a6218ac1092454a2d16f820363b69852befa3"},
|
||||
{4194305, "8f553da8d00d0bf509d8470e242888be33019c20c0544811f5b2b89e98360b92"},
|
||||
{8388607, "83b30cf4fb5195b04a937727ae379cf3d06673bf8f77947f6a92858536e8369c"},
|
||||
{8388608, "e08b3ba1f538804075c5f939accdeaa9efc7b5c01865c94a41e78ca6550a88e7"},
|
||||
{8388609, "02c8a4aefc2bfc9036f89a7098001865885938ca580e5c9e5db672385edd303c"},
|
||||
} {
|
||||
d := dbhash.New()
|
||||
var toWrite int
|
||||
for toWrite = test.n; toWrite >= chunk; toWrite -= chunk {
|
||||
n, err := d.Write(data)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, chunk, n)
|
||||
}
|
||||
n, err := d.Write(data[:toWrite])
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, toWrite, n)
|
||||
got := hex.EncodeToString(d.Sum(nil))
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("when testing length %d", n))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) }
|
||||
func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) }
|
||||
func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) }
|
||||
func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) }
|
||||
func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) }
|
||||
func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) }
|
||||
func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) }
|
||||
func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) }
|
||||
func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) }
|
||||
|
||||
func TestSumCalledTwice(t *testing.T) {
|
||||
d := dbhash.New()
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
d.Reset()
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
_, _ = d.Write([]byte{1})
|
||||
assert.Panics(t, func() { d.Sum(nil) })
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
d := dbhash.New()
|
||||
assert.Equal(t, 32, d.Size())
|
||||
}
|
||||
|
||||
func TestBlockSize(t *testing.T) {
|
||||
d := dbhash.New()
|
||||
assert.Equal(t, 64, d.BlockSize())
|
||||
}
|
||||
|
||||
func TestSum(t *testing.T) {
|
||||
assert.Equal(t,
|
||||
[64]byte{
|
||||
0x1c, 0xd6, 0xef, 0x71, 0xe6, 0xe0, 0xff, 0x46,
|
||||
0xad, 0x26, 0x09, 0xd4, 0x03, 0xdc, 0x3f, 0xee,
|
||||
0x24, 0x44, 0x17, 0x08, 0x9a, 0xa4, 0x46, 0x12,
|
||||
0x45, 0xa4, 0xe4, 0xfe, 0x23, 0xa5, 0x5e, 0x42,
|
||||
},
|
||||
dbhash.Sum([]byte{'A'}),
|
||||
)
|
||||
}
|
||||
1151
backend/dropbox/dropbox.go
Normal file
1151
backend/dropbox/dropbox.go
Normal file
File diff suppressed because it is too large
Load Diff
26
backend/dropbox/dropbox_test.go
Normal file
26
backend/dropbox/dropbox_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Test Dropbox filesystem interface
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDropbox:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
386
backend/fichier/api.go
Normal file
386
backend/fichier/api.go
Normal file
@@ -0,0 +1,386 @@
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||
|
||||
func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
Single: 1,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/download/get_token.cgi",
|
||||
}
|
||||
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, &token)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
return &token, nil
|
||||
}
|
||||
|
||||
func fileFromSharedFile(file *SharedFile) File {
|
||||
return File{
|
||||
URL: file.Link,
|
||||
Filename: file.Filename,
|
||||
Size: file.Size,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
}
|
||||
|
||||
var sharedFiles SharedFolderResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
entries = make([]fs.DirEntry, len(sharedFiles))
|
||||
|
||||
for i, sharedFile := range sharedFiles {
|
||||
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
|
||||
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
|
||||
request := ListFilesRequest{
|
||||
FolderID: directoryID,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/ls.cgi",
|
||||
}
|
||||
|
||||
filesList = &FilesList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, filesList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
return filesList, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
|
||||
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
|
||||
|
||||
request := ListFolderRequest{
|
||||
FolderID: directoryID,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/ls.cgi",
|
||||
}
|
||||
|
||||
foldersList = &FoldersList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, foldersList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
|
||||
|
||||
return foldersList, err
|
||||
}
|
||||
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files, err := f.listFiles(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folders, err := f.listFolders(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
|
||||
|
||||
for i, item := range files.Items {
|
||||
entries[i] = f.newObjectFromFile(ctx, dir, item)
|
||||
}
|
||||
|
||||
for i, folder := range folders.SubFolders {
|
||||
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
fullPath := getRemote(dir, folder.Name)
|
||||
folderID := strconv.Itoa(folder.ID)
|
||||
|
||||
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
|
||||
|
||||
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
|
||||
f.dirCache.Put(fullPath, folderID)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: getRemote(dir, item.Filename),
|
||||
file: item,
|
||||
}
|
||||
}
|
||||
|
||||
func getRemote(dir, fileName string) string {
|
||||
if dir == "" {
|
||||
return fileName
|
||||
}
|
||||
|
||||
return dir + "/" + fileName
|
||||
}
|
||||
|
||||
func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
name := replaceReservedChars(leaf)
|
||||
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
request := MakeFolderRequest{
|
||||
FolderID: folderID,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/mkdir.cgi",
|
||||
}
|
||||
|
||||
response = &MakeFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create folder")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
|
||||
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
|
||||
|
||||
request := &RemoveFolderRequest{
|
||||
FolderID: folderID,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/rm.cgi",
|
||||
}
|
||||
|
||||
response = &GenericOKResponse{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(&opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||
}
|
||||
if response.Status != "OK" {
|
||||
return nil, errors.New("Can't remove non-empty dir")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
|
||||
request := &RemoveFileRequest{
|
||||
Files: []RmFile{
|
||||
{url},
|
||||
},
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/rm.cgi",
|
||||
}
|
||||
|
||||
response = &GenericOKResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove file")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed file with url `%s`", url)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
ContentType: "application/json", // 1Fichier API is bad
|
||||
Path: "/upload/get_upload_server.cgi",
|
||||
}
|
||||
|
||||
response = &GetUploadNodeResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/upload.cgi",
|
||||
Parameters: map[string][]string{
|
||||
"id": {uploadID},
|
||||
},
|
||||
NoResponse: true,
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
MultipartContentName: "file[]",
|
||||
MultipartFileName: fileName,
|
||||
MultipartParams: map[string][]string{
|
||||
"did": {folderID},
|
||||
},
|
||||
}
|
||||
|
||||
if node != "" {
|
||||
opts.RootURL = "https://" + node
|
||||
}
|
||||
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't upload file")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/end.pl",
|
||||
RootURL: "https://" + nodeurl,
|
||||
Parameters: map[string][]string{
|
||||
"xid": {uploadID},
|
||||
},
|
||||
ExtraHeaders: map[string]string{
|
||||
"JSON": "1",
|
||||
},
|
||||
}
|
||||
|
||||
response = &EndFileUploadResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't finish file upload")
|
||||
}
|
||||
|
||||
return response, err
|
||||
}
|
||||
411
backend/fichier/fichier.go
Normal file
411
backend/fichier/fichier.go
Normal file
@@ -0,0 +1,411 @@
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
rootID = "0"
|
||||
apiBaseURL = "https://api.1fichier.com/v1"
|
||||
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
|
||||
maxSleep = 5 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
Description: "1Fichier",
|
||||
Config: func(name string, config configmap.Mapper) {
|
||||
},
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Name: "api_key",
|
||||
},
|
||||
{
|
||||
Help: "If you want to download a shared folder, add this parameter",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
}
|
||||
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
type Fs struct {
|
||||
root string
|
||||
name string
|
||||
features *fs.Features
|
||||
dirCache *dircache.DirCache
|
||||
baseClient *http.Client
|
||||
options *Options
|
||||
pacer *fs.Pacer
|
||||
rest *rest.Client
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
folderID, err := strconv.Atoi(pathID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
folders, err := f.listFolders(folderID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
for _, folder := range folders.SubFolders {
|
||||
if folder.Name == leaf {
|
||||
pathIDOut := strconv.Itoa(folder.ID)
|
||||
return pathIDOut, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
folderID, err := strconv.Atoi(pathID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp, err := f.makeFolder(leaf, folderID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strconv.Itoa(resp.FolderID), err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("1Fichier root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.Whirlpool)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// NewFs makes a new Fs object from the path
|
||||
//
|
||||
// The path is of the form remote:path
|
||||
//
|
||||
// Remotes are looked up in the config file. If the remote isn't
|
||||
// found then NotFoundInConfigFile will be returned.
|
||||
//
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
|
||||
root := replaceReservedChars(rootleaf)
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(config, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If using a Shared Folder override root
|
||||
if opt.SharedFolder != "" {
|
||||
root = ""
|
||||
}
|
||||
|
||||
//workaround for wonky parser
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
options: opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
baseClient: &http.Client{},
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
|
||||
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
|
||||
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
|
||||
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.options.SharedFolder != "" {
|
||||
return f.listSharedFiles(ctx, f.options.SharedFolder)
|
||||
}
|
||||
|
||||
dirContent, err := f.listDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dirContent, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := f.listFiles(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files.Items {
|
||||
if file.Filename == leaf {
|
||||
path, ok := f.dirCache.GetInv(directoryID)
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("Cannot find dir in dircache")
|
||||
}
|
||||
|
||||
return f.newObjectFromFile(ctx, path, file), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// putUnchecked uploads the object with the given name and size
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100E9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
nodeResponse, err := f.getUploadNode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(fileUploadResponse.Links) != 1 {
|
||||
return nil, errors.New("unexpected amount of files")
|
||||
}
|
||||
|
||||
link := fileUploadResponse.Links[0]
|
||||
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
file: File{
|
||||
ACL: 0,
|
||||
CDN: 0,
|
||||
Checksum: link.Whirlpool,
|
||||
ContentType: "",
|
||||
Date: time.Now().Format("2006-01-02 15:04:05"),
|
||||
Filename: link.Filename,
|
||||
Pass: 0,
|
||||
Size: int(fileSize),
|
||||
URL: link.Download,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.removeFolder(dir, folderID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.dirCache.FlushDir(dir)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ dircache.DirCacher = (*Fs)(nil)
|
||||
)
|
||||
17
backend/fichier/fichier_test.go
Normal file
17
backend/fichier/fichier_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test 1Fichier filesystem interface
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFichier:",
|
||||
})
|
||||
}
|
||||
158
backend/fichier/object.go
Normal file
158
backend/fichier/object.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Object is a filesystem like object provided by an Fs
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
file File
|
||||
}
|
||||
|
||||
// String returns a description of the Object
|
||||
func (o *Object) String() string {
|
||||
return o.file.Filename
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
|
||||
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
return modTime
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.file.Size)
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.Whirlpool {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
return o.file.Checksum, nil
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
//return errors.New("setting modtime is not supported for 1fichier remotes")
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, int64(o.file.Size))
|
||||
downloadToken, err := o.fs.getDownloadToken(o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: downloadToken.URL,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.rest.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if src.Size() < 0 {
|
||||
return errors.New("refusing to update with unknown size")
|
||||
}
|
||||
|
||||
// upload with new size but old name
|
||||
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete duplicate after successful upload
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to remove old version")
|
||||
}
|
||||
|
||||
// Replace guts of old object with new one
|
||||
*o = *info.(*Object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes this object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
|
||||
|
||||
_, err := o.fs.deleteFile(o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.file.ContentType
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.file.URL
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
71
backend/fichier/replace.go
Normal file
71
backend/fichier/replace.go
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
Translate file names for 1fichier
|
||||
|
||||
1Fichier reserved characters
|
||||
|
||||
The following characters are 1Fichier reserved characters, and can't
|
||||
be used in 1Fichier folder and file names.
|
||||
|
||||
*/
|
||||
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// 1Fichier has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||
'$': '$', // FULLWIDTH DOLLAR SIGN
|
||||
'`': '`', // FULLWIDTH GRAVE ACCENT
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// file names can't start with space either
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Replace reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
24
backend/fichier/replace_test.go
Normal file
24
backend/fichier/replace_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package fichier
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{"\"'<>/\\$`", `"'<>/\$``},
|
||||
{" leading space", "␠leading space"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
120
backend/fichier/structs.go
Normal file
120
backend/fichier/structs.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package fichier
|
||||
|
||||
// ListFolderRequest is the request structure of the corresponding request
|
||||
type ListFolderRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// ListFilesRequest is the request structure of the corresponding request
|
||||
type ListFilesRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// DownloadRequest is the request structure of the corresponding request
|
||||
type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
type RemoveFolderRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// RemoveFileRequest is the request structure of the corresponding request
|
||||
type RemoveFileRequest struct {
|
||||
Files []RmFile `json:"files"`
|
||||
}
|
||||
|
||||
// RmFile is the request structure of the corresponding request
|
||||
type RmFile struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// GenericOKResponse is the response structure of the corresponding request
|
||||
type GenericOKResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// MakeFolderRequest is the request structure of the corresponding request
|
||||
type MakeFolderRequest struct {
|
||||
Name string `json:"name"`
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// MakeFolderResponse is the response structure of the corresponding request
|
||||
type MakeFolderResponse struct {
|
||||
Name string `json:"name"`
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||
type GetUploadNodeResponse struct {
|
||||
ID string `json:"id"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// GetTokenResponse is the response structure of the corresponding request
|
||||
type GetTokenResponse struct {
|
||||
URL string `json:"url"`
|
||||
Status string `json:"Status"`
|
||||
Message string `json:"Message"`
|
||||
}
|
||||
|
||||
// SharedFolderResponse is the response structure of the corresponding request
|
||||
type SharedFolderResponse []SharedFile
|
||||
|
||||
// SharedFile is the structure how 1Fichier returns a shared File
|
||||
type SharedFile struct {
|
||||
Filename string `json:"filename"`
|
||||
Link string `json:"link"`
|
||||
Size int `json:"size"`
|
||||
}
|
||||
|
||||
// EndFileUploadResponse is the response structure of the corresponding request
|
||||
type EndFileUploadResponse struct {
|
||||
Incoming int `json:"incoming"`
|
||||
Links []struct {
|
||||
Download string `json:"download"`
|
||||
Filename string `json:"filename"`
|
||||
Remove string `json:"remove"`
|
||||
Size string `json:"size"`
|
||||
Whirlpool string `json:"whirlpool"`
|
||||
} `json:"links"`
|
||||
}
|
||||
|
||||
// File is the structure how 1Fichier returns a File
|
||||
type File struct {
|
||||
ACL int `json:"acl"`
|
||||
CDN int `json:"cdn"`
|
||||
Checksum string `json:"checksum"`
|
||||
ContentType string `json:"content-type"`
|
||||
Date string `json:"date"`
|
||||
Filename string `json:"filename"`
|
||||
Pass int `json:"pass"`
|
||||
Size int `json:"size"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// FilesList is the structure how 1Fichier returns a list of files
|
||||
type FilesList struct {
|
||||
Items []File `json:"items"`
|
||||
Status string `json:"Status"`
|
||||
}
|
||||
|
||||
// Folder is the structure how 1Fichier returns a Folder
|
||||
type Folder struct {
|
||||
CreateDate string `json:"create_date"`
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Pass int `json:"pass"`
|
||||
}
|
||||
|
||||
// FoldersList is the structure how 1Fichier returns a list of Folders
|
||||
type FoldersList struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"Status"`
|
||||
SubFolders []Folder `json:"sub_folders"`
|
||||
}
|
||||
846
backend/ftp/ftp.go
Normal file
846
backend/ftp/ftp.go
Normal file
@@ -0,0 +1,846 @@
|
||||
// Package ftp interfaces with FTP servers
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "ftp",
|
||||
Description: "FTP Connection",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: "Use FTP over TLS (Implicit)",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_certificate",
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
url string
|
||||
user string
|
||||
pass string
|
||||
dialAddr string
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
tokens *pacer.TokenDispenser
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
info *FileInfo
|
||||
}
|
||||
|
||||
// FileInfo is the metadata known about an FTP file
|
||||
type FileInfo struct {
|
||||
Name string
|
||||
Size uint64
|
||||
ModTime time.Time
|
||||
IsDir bool
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of this fs
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return f.url
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
||||
if f.opt.TLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
||||
}
|
||||
err = c.Login(f.user, f.pass)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Login")
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Get an FTP connection from the pool, or open a new one
|
||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
f.tokens.Get()
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
if len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
f.pool = f.pool[1:]
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
return f.ftpConnection()
|
||||
}
|
||||
|
||||
// Return an FTP connection to the pool
|
||||
//
|
||||
// It nils the pointed to connection out so it can't be reused
|
||||
//
|
||||
// if err is not nil then it checks the connection is alive using a
|
||||
// NOOP request
|
||||
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
defer f.tokens.Put()
|
||||
}
|
||||
c := *pc
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
_, isRegularError := errors.Cause(err).(*textproto.Error)
|
||||
if !isRegularError {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
_ = c.Quit()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
ctx := context.Background()
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
}
|
||||
port := opt.Port
|
||||
if port == "" {
|
||||
port = "21"
|
||||
}
|
||||
|
||||
dialAddr := opt.Host + ":" + port
|
||||
protocol := "ftp://"
|
||||
if opt.TLS {
|
||||
protocol = "ftps://"
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
url: u,
|
||||
user: user,
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
f.putFtpConnection(&c, nil)
|
||||
if root != "" {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = path.Dir(root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
||||
func translateErrorDir(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
for _, file := range files {
|
||||
if file.Name == base {
|
||||
return file, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil && entry.Type != ftp.EntryTypeFolder {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: entry.Size,
|
||||
ModTime: entry.Time,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
|
||||
var listErr error
|
||||
var files []*ftp.Entry
|
||||
|
||||
resultchan := make(chan []*ftp.Entry, 1)
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
result, err := c.List(path.Join(f.root, dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
errchan <- err
|
||||
return
|
||||
}
|
||||
resultchan <- result
|
||||
}()
|
||||
|
||||
// Wait for List for up to Timeout seconds
|
||||
timer := time.NewTimer(fs.Config.Timeout)
|
||||
select {
|
||||
case listErr = <-errchan:
|
||||
timer.Stop()
|
||||
return nil, translateErrorDir(listErr)
|
||||
case files = <-resultchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f, "Timeout when waiting for List")
|
||||
return nil, errors.New("Timeout when waiting for List")
|
||||
}
|
||||
|
||||
// Annoyingly FTP returns success for a directory which
|
||||
// doesn't exist, so check it really doesn't exist if no
|
||||
// entries found.
|
||||
if len(files) == 0 {
|
||||
exists, err := f.dirExists(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
if !exists {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
for i := range files {
|
||||
object := files[i]
|
||||
newremote := path.Join(dir, object.Name)
|
||||
switch object.Type {
|
||||
case ftp.EntryTypeFolder:
|
||||
if object.Name == "." || object.Name == ".." {
|
||||
continue
|
||||
}
|
||||
d := fs.NewDir(newremote, object.Time)
|
||||
entries = append(entries, d)
|
||||
default:
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: newremote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
Name: newremote,
|
||||
Size: object.Size,
|
||||
ModTime: object.Time,
|
||||
}
|
||||
o.info = info
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Hashes are not supported
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Precision shows Modified Time not supported
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||
err := f.mkParentDir(src.Remote())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
err = o.Update(ctx, in, src, options...)
|
||||
return o, err
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// getInfo reads the FileInfo for a path
|
||||
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
if files[i].Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: files[i].Size,
|
||||
ModTime: files[i].Time,
|
||||
IsDir: files[i].Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using unrooted paths
|
||||
func (f *Fs) mkdir(abspath string) error {
|
||||
if abspath == "." || abspath == "/" {
|
||||
return nil
|
||||
}
|
||||
fi, err := f.getInfo(abspath)
|
||||
if err == nil {
|
||||
if fi.IsDir {
|
||||
return nil
|
||||
}
|
||||
return fs.ErrorIsFile
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
||||
}
|
||||
parent := path.Dir(abspath)
|
||||
err = f.mkdir(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, connErr := f.getFtpConnection()
|
||||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
}
|
||||
err = c.MakeDir(abspath)
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkParentDir makes the parent of remote if necessary and any
|
||||
// directories above that
|
||||
func (f *Fs) mkParentDir(remote string) error {
|
||||
parent := path.Dir(remote)
|
||||
return f.mkdir(path.Join(f.root, parent))
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||
root := path.Join(f.root, dir)
|
||||
return f.mkdir(root)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
}
|
||||
err = c.RemoveDir(path.Join(f.root, dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
return translateErrorDir(err)
|
||||
}
|
||||
|
||||
// Move renames a remote file object
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err := f.mkParentDir(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||
}
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
err = c.Rename(
|
||||
path.Join(srcObj.fs.root, srcObj.remote),
|
||||
path.Join(f.root, remote),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move Rename failed")
|
||||
}
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
fi, err := f.getInfo(dstPath)
|
||||
if err == nil {
|
||||
if fi.IsDir {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
return fs.ErrorIsFile
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return errors.Wrapf(err, "DirMove getInfo failed")
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkdir(path.Dir(dstPath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
}
|
||||
|
||||
// Do the move
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
err = c.Rename(
|
||||
srcPath,
|
||||
dstPath,
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// String version of o
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the hash of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.info.Size)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.info.ModTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ftpReadCloser implements io.ReadCloser for FTP objects.
|
||||
type ftpReadCloser struct {
|
||||
rc io.ReadCloser
|
||||
c *ftp.ServerConn
|
||||
f *Fs
|
||||
err error // errors found during read
|
||||
}
|
||||
|
||||
// Read bytes into p
|
||||
func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = f.rc.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
f.err = err // store any errors for Close to examine
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close the FTP reader and return the connection to the pool
|
||||
func (f *ftpReadCloser) Close() error {
|
||||
var err error
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
||||
return nil
|
||||
}
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
} else {
|
||||
f.f.putFtpConnection(&f.c, nil)
|
||||
}
|
||||
// mask the error if it was caused by a premature close
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
fd, err := c.RetrFrom(path, uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
// Give the FTP server a chance to get its internal state in order after the error.
|
||||
// The error may have been local in which case we closed the connection. The server
|
||||
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
||||
// able to think of a better method to find out if the server has finished - ncw
|
||||
time.Sleep(1 * time.Second)
|
||||
removeErr := o.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||
} else {
|
||||
fs.Debugf(o, "Removed after failed upload: %v", err)
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
err = c.Stor(path, in)
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
o.info, err = o.fs.getInfo(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "update getinfo")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// defer fs.Trace(o, "")("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// Check if it's a directory or a file
|
||||
info, err := o.fs.getInfo(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir {
|
||||
err = o.fs.Rmdir(ctx, o.remote)
|
||||
} else {
|
||||
c, err := o.fs.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
err = c.Delete(path)
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
17
backend/ftp/ftp_test.go
Normal file
17
backend/ftp/ftp_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test FTP filesystem interface
|
||||
package ftp_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/ftp"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTP:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
1047
backend/googlecloudstorage/googlecloudstorage.go
Normal file
1047
backend/googlecloudstorage/googlecloudstorage.go
Normal file
File diff suppressed because it is too large
Load Diff
18
backend/googlecloudstorage/googlecloudstorage_test.go
Normal file
18
backend/googlecloudstorage/googlecloudstorage_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoogleCloudStorage:",
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
148
backend/googlephotos/albums.go
Normal file
148
backend/googlephotos/albums.go
Normal file
@@ -0,0 +1,148 @@
|
||||
// This file contains the albums abstraction
|
||||
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
)
|
||||
|
||||
// All the albums
|
||||
type albums struct {
|
||||
mu sync.Mutex
|
||||
dupes map[string][]*api.Album // duplicated names
|
||||
byID map[string]*api.Album //..indexed by ID
|
||||
byTitle map[string]*api.Album //..indexed by Title
|
||||
path map[string][]string // partial album names to directory
|
||||
}
|
||||
|
||||
// Create a new album
|
||||
func newAlbums() *albums {
|
||||
return &albums{
|
||||
dupes: map[string][]*api.Album{},
|
||||
byID: map[string]*api.Album{},
|
||||
byTitle: map[string]*api.Album{},
|
||||
path: map[string][]string{},
|
||||
}
|
||||
}
|
||||
|
||||
// add an album
|
||||
func (as *albums) add(album *api.Album) {
|
||||
// Munge the name of the album into a sensible path name
|
||||
album.Title = path.Clean(album.Title)
|
||||
if album.Title == "." || album.Title == "/" {
|
||||
album.Title = addID("", album.ID)
|
||||
}
|
||||
|
||||
as.mu.Lock()
|
||||
as._add(album)
|
||||
as.mu.Unlock()
|
||||
}
|
||||
|
||||
// _add an album - call with lock held
|
||||
func (as *albums) _add(album *api.Album) {
|
||||
// update dupes by title
|
||||
dupes := as.dupes[album.Title]
|
||||
dupes = append(dupes, album)
|
||||
as.dupes[album.Title] = dupes
|
||||
|
||||
// Dedupe the album name if necessary
|
||||
if len(dupes) >= 2 {
|
||||
// If this is the first dupe, then need to adjust the first one
|
||||
if len(dupes) == 2 {
|
||||
firstAlbum := dupes[0]
|
||||
as._del(firstAlbum)
|
||||
as._add(firstAlbum)
|
||||
// undo add of firstAlbum to dupes
|
||||
as.dupes[album.Title] = dupes
|
||||
}
|
||||
album.Title = addID(album.Title, album.ID)
|
||||
}
|
||||
|
||||
// Store the new album
|
||||
as.byID[album.ID] = album
|
||||
as.byTitle[album.Title] = album
|
||||
|
||||
// Store the partial paths
|
||||
dir, leaf := album.Title, ""
|
||||
for dir != "" {
|
||||
i := strings.LastIndex(dir, "/")
|
||||
if i >= 0 {
|
||||
dir, leaf = dir[:i], dir[i+1:]
|
||||
} else {
|
||||
dir, leaf = "", dir
|
||||
}
|
||||
dirs := as.path[dir]
|
||||
found := false
|
||||
for _, dir := range dirs {
|
||||
if dir == leaf {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
as.path[dir] = append(as.path[dir], leaf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// del an album
|
||||
func (as *albums) del(album *api.Album) {
|
||||
as.mu.Lock()
|
||||
as._del(album)
|
||||
as.mu.Unlock()
|
||||
}
|
||||
|
||||
// _del an album - call with lock held
|
||||
func (as *albums) _del(album *api.Album) {
|
||||
// We leave in dupes so it doesn't cause albums to get renamed
|
||||
|
||||
// Remove from byID and byTitle
|
||||
delete(as.byID, album.ID)
|
||||
delete(as.byTitle, album.Title)
|
||||
|
||||
// Remove from paths
|
||||
dir, leaf := album.Title, ""
|
||||
for dir != "" {
|
||||
// Can't delete if this dir exists anywhere in the path structure
|
||||
if _, found := as.path[dir]; found {
|
||||
break
|
||||
}
|
||||
i := strings.LastIndex(dir, "/")
|
||||
if i >= 0 {
|
||||
dir, leaf = dir[:i], dir[i+1:]
|
||||
} else {
|
||||
dir, leaf = "", dir
|
||||
}
|
||||
dirs := as.path[dir]
|
||||
for i, dir := range dirs {
|
||||
if dir == leaf {
|
||||
dirs = append(dirs[:i], dirs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(dirs) == 0 {
|
||||
delete(as.path, dir)
|
||||
} else {
|
||||
as.path[dir] = dirs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get an album by title
|
||||
func (as *albums) get(title string) (album *api.Album, ok bool) {
|
||||
as.mu.Lock()
|
||||
defer as.mu.Unlock()
|
||||
album, ok = as.byTitle[title]
|
||||
return album, ok
|
||||
}
|
||||
|
||||
// getDirs gets directories below an album path
|
||||
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
|
||||
as.mu.Lock()
|
||||
defer as.mu.Unlock()
|
||||
dirs, ok = as.path[albumPath]
|
||||
return dirs, ok
|
||||
}
|
||||
311
backend/googlephotos/albums_test.go
Normal file
311
backend/googlephotos/albums_test.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewAlbums(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
assert.NotNil(t, albums.dupes)
|
||||
assert.NotNil(t, albums.byID)
|
||||
assert.NotNil(t, albums.byTitle)
|
||||
assert.NotNil(t, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsAdd(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{}, albums.path)
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one"},
|
||||
}, albums.path)
|
||||
|
||||
a2 := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2",
|
||||
}
|
||||
albums.add(a2)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"two": a2,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a duplicate
|
||||
a2a := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2a",
|
||||
}
|
||||
albums.add(a2a)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a sub directory
|
||||
a1sub := &api.Album{
|
||||
Title: "one/sub",
|
||||
ID: "1sub",
|
||||
}
|
||||
albums.add(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a weird path
|
||||
a0 := &api.Album{
|
||||
Title: "/../././..////.",
|
||||
ID: "0",
|
||||
}
|
||||
albums.add(a0)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"{0}": []*api.Album{a0},
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"0": a0,
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"{0}": a0,
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}", "{0}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsDel(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
a2 := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2",
|
||||
}
|
||||
albums.add(a2)
|
||||
|
||||
// Add a duplicate
|
||||
a2a := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2a",
|
||||
}
|
||||
albums.add(a2a)
|
||||
|
||||
// Add a sub directory
|
||||
a1sub := &api.Album{
|
||||
Title: "one/sub",
|
||||
ID: "1sub",
|
||||
}
|
||||
albums.add(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a1)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a2)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a2a)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{}, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsGet(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
album, ok := albums.get("one")
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, a1, album)
|
||||
|
||||
album, ok = albums.get("notfound")
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Nil(t, album)
|
||||
}
|
||||
|
||||
func TestAlbumsGetDirs(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
dirs, ok := albums.getDirs("")
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, []string{"one"}, dirs)
|
||||
|
||||
dirs, ok = albums.getDirs("notfound")
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Nil(t, dirs)
|
||||
}
|
||||
190
backend/googlephotos/api/types.go
Normal file
190
backend/googlephotos/api/types.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrorDetails in the internals of the Error type
|
||||
type ErrorDetails struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error is returned on errors
|
||||
type Error struct {
|
||||
Details ErrorDetails `json:"error"`
|
||||
}
|
||||
|
||||
// Error statisfies error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
|
||||
}
|
||||
|
||||
// Album of photos
|
||||
type Album struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Title string `json:"title"`
|
||||
ProductURL string `json:"productUrl,omitempty"`
|
||||
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
|
||||
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
|
||||
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
|
||||
IsWriteable bool `json:"isWriteable,omitempty"`
|
||||
}
|
||||
|
||||
// ListAlbums is returned from albums.list and sharedAlbums.list
|
||||
type ListAlbums struct {
|
||||
Albums []Album `json:"albums"`
|
||||
SharedAlbums []Album `json:"sharedAlbums"`
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
// CreateAlbum creates an Album
|
||||
type CreateAlbum struct {
|
||||
Album *Album `json:"album"`
|
||||
}
|
||||
|
||||
// MediaItem is a photo or video
|
||||
type MediaItem struct {
|
||||
ID string `json:"id"`
|
||||
ProductURL string `json:"productUrl"`
|
||||
BaseURL string `json:"baseUrl"`
|
||||
MimeType string `json:"mimeType"`
|
||||
MediaMetadata struct {
|
||||
CreationTime time.Time `json:"creationTime"`
|
||||
Width string `json:"width"`
|
||||
Height string `json:"height"`
|
||||
Photo struct {
|
||||
} `json:"photo"`
|
||||
} `json:"mediaMetadata"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
|
||||
// MediaItems is returned from mediaitems.list, mediaitems.search
|
||||
type MediaItems struct {
|
||||
MediaItems []MediaItem `json:"mediaItems"`
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
//Content categories
|
||||
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
||||
// LANDSCAPES Media items containing landscapes.
|
||||
// RECEIPTS Media items containing receipts.
|
||||
// CITYSCAPES Media items containing cityscapes.
|
||||
// LANDMARKS Media items containing landmarks.
|
||||
// SELFIES Media items that are selfies.
|
||||
// PEOPLE Media items containing people.
|
||||
// PETS Media items containing pets.
|
||||
// WEDDINGS Media items from weddings.
|
||||
// BIRTHDAYS Media items from birthdays.
|
||||
// DOCUMENTS Media items containing documents.
|
||||
// TRAVEL Media items taken during travel.
|
||||
// ANIMALS Media items containing animals.
|
||||
// FOOD Media items containing food.
|
||||
// SPORT Media items from sporting events.
|
||||
// NIGHT Media items taken at night.
|
||||
// PERFORMANCES Media items from performances.
|
||||
// WHITEBOARDS Media items containing whiteboards.
|
||||
// SCREENSHOTS Media items that are screenshots.
|
||||
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
|
||||
// ARTS Media items containing art.
|
||||
// CRAFTS Media items containing crafts.
|
||||
// FASHION Media items related to fashion.
|
||||
// HOUSES Media items containing houses.
|
||||
// GARDENS Media items containing gardens.
|
||||
// FLOWERS Media items containing flowers.
|
||||
// HOLIDAYS Media items taken of holidays.
|
||||
|
||||
// MediaTypes
|
||||
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
|
||||
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
|
||||
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
|
||||
|
||||
// Features
|
||||
// NONE Treated as if no filters are applied. All features are included.
|
||||
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
|
||||
|
||||
// Date is used as part of SearchFilter
|
||||
type Date struct {
|
||||
Year int `json:"year,omitempty"`
|
||||
Month int `json:"month,omitempty"`
|
||||
Day int `json:"day,omitempty"`
|
||||
}
|
||||
|
||||
// DateFilter is uses to add date ranges to media item queries
|
||||
type DateFilter struct {
|
||||
Dates []Date `json:"dates,omitempty"`
|
||||
Ranges []struct {
|
||||
StartDate Date `json:"startDate,omitempty"`
|
||||
EndDate Date `json:"endDate,omitempty"`
|
||||
} `json:"ranges,omitempty"`
|
||||
}
|
||||
|
||||
// ContentFilter is uses to add content categories to media item queries
|
||||
type ContentFilter struct {
|
||||
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
|
||||
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
|
||||
}
|
||||
|
||||
// MediaTypeFilter is uses to add media types to media item queries
|
||||
type MediaTypeFilter struct {
|
||||
MediaTypes []string `json:"mediaTypes,omitempty"`
|
||||
}
|
||||
|
||||
// FeatureFilter is uses to add features to media item queries
|
||||
type FeatureFilter struct {
|
||||
IncludedFeatures []string `json:"includedFeatures,omitempty"`
|
||||
}
|
||||
|
||||
// Filters combines all the filter types for media item queries
|
||||
type Filters struct {
|
||||
DateFilter *DateFilter `json:"dateFilter,omitempty"`
|
||||
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
|
||||
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
|
||||
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
|
||||
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
|
||||
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
|
||||
}
|
||||
|
||||
// SearchFilter is uses with mediaItems.search
|
||||
type SearchFilter struct {
|
||||
AlbumID string `json:"albumId,omitempty"`
|
||||
PageSize int `json:"pageSize"`
|
||||
PageToken string `json:"pageToken,omitempty"`
|
||||
Filters *Filters `json:"filters,omitempty"`
|
||||
}
|
||||
|
||||
// SimpleMediaItem is part of NewMediaItem
|
||||
type SimpleMediaItem struct {
|
||||
UploadToken string `json:"uploadToken"`
|
||||
}
|
||||
|
||||
// NewMediaItem is a single media item for upload
|
||||
type NewMediaItem struct {
|
||||
Description string `json:"description"`
|
||||
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
|
||||
}
|
||||
|
||||
// BatchCreateRequest creates media items from upload tokens
|
||||
type BatchCreateRequest struct {
|
||||
AlbumID string `json:"albumId,omitempty"`
|
||||
NewMediaItems []NewMediaItem `json:"newMediaItems"`
|
||||
}
|
||||
|
||||
// BatchCreateResponse is returned from BatchCreateRequest
|
||||
type BatchCreateResponse struct {
|
||||
NewMediaItemResults []struct {
|
||||
UploadToken string `json:"uploadToken"`
|
||||
Status struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
} `json:"status"`
|
||||
MediaItem MediaItem `json:"mediaItem"`
|
||||
} `json:"newMediaItemResults"`
|
||||
}
|
||||
|
||||
// BatchRemoveItems is for removing items from an album
|
||||
type BatchRemoveItems struct {
|
||||
MediaItemIds []string `json:"mediaItemIds"`
|
||||
}
|
||||
970
backend/googlephotos/googlephotos.go
Normal file
970
backend/googlephotos/googlephotos.go
Normal file
@@ -0,0 +1,970 @@
|
||||
// Package googlephotos provides an interface to Google Photos
|
||||
package googlephotos
|
||||
|
||||
// FIXME Resumable uploads not implemented - rclone can't resume uploads in general
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
golog "log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
var (
|
||||
errCantUpload = errors.New("can't upload files here")
|
||||
errCantMkdir = errors.New("can't make directories here")
|
||||
errCantRmdir = errors.New("can't remove this directory")
|
||||
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
|
||||
errRemove = errors.New("google photos API only implements removing files from albums")
|
||||
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "202264815644-rt1o1c9evjaotbpbab10m83i8cnjk077.apps.googleusercontent.com"
|
||||
rcloneEncryptedClientSecret = "kLJLretPefBgrDHosdml_nlF64HZ9mUcO85X5rdjYBPP8ChA-jr3Ow"
|
||||
rootURL = "https://photoslibrary.googleapis.com/v1"
|
||||
listChunks = 100 // chunk size to read directory listings
|
||||
albumChunks = 50 // chunk size to read album listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
scopeReadWrite,
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "google photos",
|
||||
Prefix: "gphotos",
|
||||
Description: "Google Photos",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[0] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[0] = scopeReadWrite
|
||||
}
|
||||
|
||||
// Do the oauth
|
||||
err = oauthutil.Config("google photos", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
|
||||
// Warn the user
|
||||
fmt.Print(`
|
||||
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
*** are stored in full resolution at original quality. These uploads
|
||||
*** will count towards storage in your Google Account.
|
||||
|
||||
`)
|
||||
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "read_only",
|
||||
Default: false,
|
||||
Help: `Set to make the Google Photos backend read only.
|
||||
|
||||
If you choose read only then rclone will only request read only access
|
||||
to your photos, otherwise rclone will request full access.`,
|
||||
}, {
|
||||
Name: "read_size",
|
||||
Default: false,
|
||||
Help: `Set to read the size of media items.
|
||||
|
||||
Normally rclone does not read the size of media items since this takes
|
||||
another transaction. This isn't necessary for syncing. However
|
||||
rclone mount needs to know the size of files in advance of reading
|
||||
them, so setting this flag when using rclone mount is recommended if
|
||||
you want to read the media.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
albumsMu sync.Mutex // protect albums (but not contents)
|
||||
albums map[bool]*albums // albums, shared or not
|
||||
uploadedMu sync.Mutex // to protect the below
|
||||
uploaded dirtree.DirTree // record of uploaded items
|
||||
createMu sync.Mutex // held when creating albums to prevent dupes
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
url string // download path
|
||||
id string // ID of this object
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
mimeType string
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Google Photos path %q", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// dirTime returns the time to set a directory to
|
||||
func (f *Fs) dirTime() time.Time {
|
||||
return f.startTime
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
body = nil
|
||||
}
|
||||
var e = api.Error{
|
||||
Details: api.ErrorDetails{
|
||||
Code: resp.StatusCode,
|
||||
Message: string(body),
|
||||
Status: resp.Status,
|
||||
},
|
||||
}
|
||||
if body != nil {
|
||||
_ = json.Unmarshal(body, &e)
|
||||
}
|
||||
return &e
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
|
||||
root = strings.Trim(path.Clean(root), "/")
|
||||
if root == "." || root == "/" {
|
||||
root = ""
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
startTime: time.Now(),
|
||||
albums: map[bool]*albums{},
|
||||
uploaded: dirtree.New(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
_, _, pattern := patterns.match(f.root, "", true)
|
||||
if pattern != nil && pattern.isFile {
|
||||
oldRoot := f.root
|
||||
var leaf string
|
||||
f.root, leaf = path.Split(f.root)
|
||||
f.root = strings.TrimRight(f.root, "/")
|
||||
_, err := f.NewObject(context.TODO(), leaf)
|
||||
if err == nil {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
f.root = oldRoot
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.MediaItem) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("")
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// addID adds the ID to name
|
||||
func addID(name string, ID string) string {
|
||||
idStr := "{" + ID + "}"
|
||||
if name == "" {
|
||||
return idStr
|
||||
}
|
||||
return name + " " + idStr
|
||||
}
|
||||
|
||||
// addFileID adds the ID to the fileName passed in
|
||||
func addFileID(fileName string, ID string) string {
|
||||
ext := path.Ext(fileName)
|
||||
base := fileName[:len(fileName)-len(ext)]
|
||||
return addID(base, ID) + ext
|
||||
}
|
||||
|
||||
var idRe = regexp.MustCompile(`\{([A-Za-z0-9_-]{55,})\}`)
|
||||
|
||||
// findID finds an ID in string if one is there or ""
|
||||
func findID(name string) string {
|
||||
match := idRe.FindStringSubmatch(name)
|
||||
if match == nil {
|
||||
return ""
|
||||
}
|
||||
return match[1]
|
||||
}
|
||||
|
||||
// list the albums into an internal cache
|
||||
// FIXME cache invalidation
|
||||
func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
|
||||
f.albumsMu.Lock()
|
||||
defer f.albumsMu.Unlock()
|
||||
all, ok := f.albums[shared]
|
||||
if ok && all != nil {
|
||||
return all, nil
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/albums",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
if shared {
|
||||
opts.Path = "/sharedAlbums"
|
||||
}
|
||||
all = newAlbums()
|
||||
opts.Parameters.Set("pageSize", strconv.Itoa(albumChunks))
|
||||
lastID := ""
|
||||
for {
|
||||
var result api.ListAlbums
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list albums")
|
||||
}
|
||||
newAlbums := result.Albums
|
||||
if shared {
|
||||
newAlbums = result.SharedAlbums
|
||||
}
|
||||
if len(newAlbums) > 0 && newAlbums[0].ID == lastID {
|
||||
// skip first if ID duplicated from last page
|
||||
newAlbums = newAlbums[1:]
|
||||
}
|
||||
if len(newAlbums) > 0 {
|
||||
lastID = newAlbums[len(newAlbums)-1].ID
|
||||
}
|
||||
for i := range newAlbums {
|
||||
all.add(&newAlbums[i])
|
||||
}
|
||||
if result.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
opts.Parameters.Set("pageToken", result.NextPageToken)
|
||||
}
|
||||
f.albums[shared] = all
|
||||
return all, nil
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *api.MediaItem, isDirectory bool) error
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/mediaItems:search",
|
||||
}
|
||||
filter.PageSize = listChunks
|
||||
filter.PageToken = ""
|
||||
lastID := ""
|
||||
for {
|
||||
var result api.MediaItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &filter, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
items := result.MediaItems
|
||||
if len(items) > 0 && items[0].ID == lastID {
|
||||
// skip first if ID duplicated from last page
|
||||
items = items[1:]
|
||||
}
|
||||
if len(items) > 0 {
|
||||
lastID = items[len(items)-1].ID
|
||||
}
|
||||
for i := range items {
|
||||
item := &result.MediaItems[i]
|
||||
remote := item.Filename
|
||||
remote = strings.Replace(remote, "/", "/", -1)
|
||||
err = fn(remote, item, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if result.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
filter.PageToken = result.NextPageToken
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaItem, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, f.dirTime())
|
||||
return d, nil
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
o.setMetaData(item)
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Dedupe the file names
|
||||
dupes := map[string]int{}
|
||||
for _, entry := range entries {
|
||||
o, ok := entry.(*Object)
|
||||
if ok {
|
||||
dupes[o.remote]++
|
||||
}
|
||||
}
|
||||
for _, entry := range entries {
|
||||
o, ok := entry.(*Object)
|
||||
if ok {
|
||||
duplicated := dupes[o.remote] > 1
|
||||
if duplicated || o.remote == "" {
|
||||
o.remote = addFileID(o.remote, o.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// listUploads lists a single directory from the uploads
|
||||
func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
f.uploadedMu.Lock()
|
||||
entries, ok := f.uploaded[dir]
|
||||
f.uploadedMu.Unlock()
|
||||
if !ok && dir != "" {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil || pattern.isFile {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
if pattern.toEntries != nil {
|
||||
return pattern.toEntries(ctx, f, prefix, match)
|
||||
}
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
defer log.Trace(f, "src=%+v", src)("")
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// createAlbum creates the album
|
||||
func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/albums",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
var request = api.CreateAlbum{
|
||||
Album: &api.Album{
|
||||
Title: albumTitle,
|
||||
},
|
||||
}
|
||||
var result api.Album
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create album")
|
||||
}
|
||||
f.albums[false].add(&result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getOrCreateAlbum gets an existing album or creates a new one
|
||||
//
|
||||
// It does the creation with the lock held to avoid duplicates
|
||||
func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
|
||||
f.createMu.Lock()
|
||||
defer f.createMu.Unlock()
|
||||
albums, err := f.listAlbums(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
album, ok := albums.get(albumTitle)
|
||||
if ok {
|
||||
return album, nil
|
||||
}
|
||||
return f.createAlbum(ctx, albumTitle)
|
||||
}
|
||||
|
||||
// Mkdir creates the album if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
if !pattern.canMkdir {
|
||||
return errCantMkdir
|
||||
}
|
||||
if pattern.isUpload {
|
||||
f.uploadedMu.Lock()
|
||||
d := fs.NewDir(strings.Trim(prefix, "/"), f.dirTime())
|
||||
f.uploaded.AddEntry(d)
|
||||
f.uploadedMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
albumTitle := match[1]
|
||||
_, err = f.getOrCreateAlbum(ctx, albumTitle)
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
match, _, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
if !pattern.canMkdir {
|
||||
return errCantRmdir
|
||||
}
|
||||
if pattern.isUpload {
|
||||
f.uploadedMu.Lock()
|
||||
err = f.uploaded.Prune(map[string]bool{
|
||||
dir: true,
|
||||
})
|
||||
f.uploadedMu.Unlock()
|
||||
return err
|
||||
}
|
||||
albumTitle := match[1]
|
||||
allAlbums, err := f.listAlbums(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
album, ok := allAlbums.get(albumTitle)
|
||||
if !ok {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
_ = album
|
||||
return errAlbumDelete
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
defer log.Trace(o, "")("")
|
||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||
return o.bytes
|
||||
}
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Size: Failed to read metadata: %v", err)
|
||||
return -1
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "HEAD",
|
||||
RootURL: o.downloadURL(),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Reading size failed: %v", err)
|
||||
} else {
|
||||
lengthStr := resp.Header.Get("Content-Length")
|
||||
length, err := strconv.ParseInt(lengthStr, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Reading size failed to parse Content_length %q: %v", lengthStr, err)
|
||||
} else {
|
||||
o.bytes = length
|
||||
}
|
||||
}
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// setMetaData sets the fs data from a storage.Object
|
||||
func (o *Object) setMetaData(info *api.MediaItem) {
|
||||
o.url = info.BaseURL
|
||||
o.id = info.ID
|
||||
o.bytes = -1 // FIXME
|
||||
o.mimeType = info.MimeType
|
||||
o.modTime = info.MediaMetadata.CreationTime
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if !o.modTime.IsZero() && o.url != "" {
|
||||
return nil
|
||||
}
|
||||
dir, fileName := path.Split(o.remote)
|
||||
dir = strings.Trim(dir, "/")
|
||||
_, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
if !pattern.isFile {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// If have ID fetch it directly
|
||||
if id := findID(fileName); id != "" {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/mediaItems/" + id,
|
||||
}
|
||||
var item api.MediaItem
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &item)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't get media item")
|
||||
}
|
||||
o.setMetaData(&item)
|
||||
return nil
|
||||
}
|
||||
// Otherwise list the directory the file is in
|
||||
entries, err := o.fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
// and find the file in the directory
|
||||
for _, entry := range entries {
|
||||
if entry.Remote() == o.remote {
|
||||
if newO, ok := entry.(*Object); ok {
|
||||
*o = *newO
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
defer log.Trace(o, "")("")
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// downloadURL returns the URL for a full bytes download for the object
|
||||
func (o *Object) downloadURL() string {
|
||||
url := o.url + "=d"
|
||||
if strings.HasPrefix(o.mimeType, "video/") {
|
||||
url += "v"
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
defer log.Trace(o, "")("")
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.downloadURL(),
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||
return errCantUpload
|
||||
}
|
||||
var (
|
||||
albumID string
|
||||
fileName string
|
||||
)
|
||||
if pattern.isUpload {
|
||||
fileName = match[1]
|
||||
} else {
|
||||
var albumTitle string
|
||||
albumTitle, fileName = match[1], match[2]
|
||||
|
||||
album, err := o.fs.getOrCreateAlbum(ctx, albumTitle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !album.IsWriteable {
|
||||
return errOwnAlbums
|
||||
}
|
||||
|
||||
albumID = album.ID
|
||||
}
|
||||
|
||||
// Upload the media item in exchange for an UploadToken
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/uploads",
|
||||
ExtraHeaders: map[string]string{
|
||||
"X-Goog-Upload-File-Name": fileName,
|
||||
"X-Goog-Upload-Protocol": "raw",
|
||||
},
|
||||
Body: in,
|
||||
}
|
||||
var token []byte
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
if err != nil {
|
||||
_ = resp.Body.Close()
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
token, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't upload file")
|
||||
}
|
||||
uploadToken := strings.TrimSpace(string(token))
|
||||
if uploadToken == "" {
|
||||
return errors.New("empty upload token")
|
||||
}
|
||||
|
||||
// Create the media item from an UploadToken, optionally adding to an album
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/mediaItems:batchCreate",
|
||||
}
|
||||
var request = api.BatchCreateRequest{
|
||||
AlbumID: albumID,
|
||||
NewMediaItems: []api.NewMediaItem{
|
||||
{
|
||||
SimpleMediaItem: api.SimpleMediaItem{
|
||||
UploadToken: uploadToken,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var result api.BatchCreateResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create media item")
|
||||
}
|
||||
if len(result.NewMediaItemResults) != 1 {
|
||||
return errors.New("bad response to BatchCreate wrong number of items")
|
||||
}
|
||||
mediaItemResult := result.NewMediaItemResults[0]
|
||||
if mediaItemResult.Status.Code != 0 {
|
||||
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||
}
|
||||
o.setMetaData(&mediaItemResult.MediaItem)
|
||||
|
||||
// Add upload to internal storage
|
||||
if pattern.isUpload {
|
||||
o.fs.uploaded.AddEntry(o)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload || pattern.isUpload {
|
||||
return errRemove
|
||||
}
|
||||
albumTitle, fileName := match[1], match[2]
|
||||
album, ok := o.fs.albums[false].get(albumTitle)
|
||||
if !ok {
|
||||
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
|
||||
NoResponse: true,
|
||||
}
|
||||
var request = api.BatchRemoveItems{
|
||||
MediaItemIds: []string{o.id},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't delete item from album")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// ID of an Object if known, "" otherwise
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
306
backend/googlephotos/googlephotos_test.go
Normal file
306
backend/googlephotos/googlephotos_test.go
Normal file
@@ -0,0 +1,306 @@
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
// We have two different files here as Google Photos will uniq
|
||||
// them otherwise which confuses the tests as the filename is
|
||||
// unexpected.
|
||||
fileNameAlbum = "rclone-test-image1.jpg"
|
||||
fileNameUpload = "rclone-test-image2.jpg"
|
||||
)
|
||||
|
||||
// Wrapper to override the remote for an object
|
||||
type overrideRemoteObject struct {
|
||||
fs.Object
|
||||
remote string
|
||||
}
|
||||
|
||||
// Remote returns the overridden remote name
|
||||
func (o *overrideRemoteObject) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fstest.Initialise()
|
||||
|
||||
// Create Fs
|
||||
if *fstest.RemoteName == "" {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(*fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create local Fs pointing at testfiles
|
||||
localFs, err := fs.NewFs("testfiles")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CreateAlbum", func(t *testing.T) {
|
||||
albumName := "album/rclone-test-" + fstest.RandomString(24)
|
||||
err = f.Mkdir(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
remote := albumName + "/" + fileNameAlbum
|
||||
|
||||
t.Run("PutFile", func(t *testing.T) {
|
||||
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
||||
|
||||
t.Run("ObjectFs", func(t *testing.T) {
|
||||
assert.Equal(t, f, dstObj.Fs())
|
||||
})
|
||||
|
||||
t.Run("ObjectString", func(t *testing.T) {
|
||||
assert.Equal(t, remote, dstObj.String())
|
||||
assert.Equal(t, "<nil>", (*Object)(nil).String())
|
||||
})
|
||||
|
||||
t.Run("ObjectHash", func(t *testing.T) {
|
||||
h, err := dstObj.Hash(ctx, hash.MD5)
|
||||
assert.Equal(t, "", h)
|
||||
assert.Equal(t, hash.ErrUnsupported, err)
|
||||
})
|
||||
|
||||
t.Run("ObjectSize", func(t *testing.T) {
|
||||
assert.Equal(t, int64(-1), dstObj.Size())
|
||||
f.(*Fs).opt.ReadSize = true
|
||||
defer func() {
|
||||
f.(*Fs).opt.ReadSize = false
|
||||
}()
|
||||
size := dstObj.Size()
|
||||
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
|
||||
})
|
||||
|
||||
t.Run("ObjectSetModTime", func(t *testing.T) {
|
||||
err := dstObj.SetModTime(ctx, time.Now())
|
||||
assert.Equal(t, fs.ErrorCantSetModTime, err)
|
||||
})
|
||||
|
||||
t.Run("ObjectStorable", func(t *testing.T) {
|
||||
assert.True(t, dstObj.Storable())
|
||||
})
|
||||
|
||||
t.Run("ObjectOpen", func(t *testing.T) {
|
||||
in, err := dstObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
buf, err := ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, in.Close())
|
||||
assert.True(t, len(buf) > 1000)
|
||||
contentType := http.DetectContentType(buf[:512])
|
||||
assert.Equal(t, "image/jpeg", contentType)
|
||||
})
|
||||
|
||||
t.Run("CheckFileInAlbum", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, remote, entries[0].Remote())
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
// Check it is there in the date/month/year heirachy
|
||||
// 2013-07-13 is the creation date of the folder
|
||||
checkPresent := func(t *testing.T, objPath string) {
|
||||
entries, err := f.List(ctx, objPath)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, entry := range entries {
|
||||
leaf := path.Base(entry.Remote())
|
||||
if leaf == fileNameAlbum || leaf == remoteWithID {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
|
||||
}
|
||||
|
||||
t.Run("CheckInByYear", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-year/2013")
|
||||
})
|
||||
|
||||
t.Run("CheckInByMonth", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-month/2013/2013-07")
|
||||
})
|
||||
|
||||
t.Run("CheckInByDay", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-day/2013/2013-07-26")
|
||||
})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remote, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewObjectWithID", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remoteWithID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remoteWithID, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewFsIsFile", func(t *testing.T) {
|
||||
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
|
||||
assert.Equal(t, fs.ErrorIsFile, err)
|
||||
leaf := path.Base(remote)
|
||||
o, err := fNew.NewObject(ctx, leaf)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, leaf, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
|
||||
err = dstObj.Remove(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// Check album empty
|
||||
entries, err := f.List(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(entries))
|
||||
})
|
||||
})
|
||||
|
||||
// remove the album
|
||||
err = f.Rmdir(ctx, albumName)
|
||||
require.Error(t, err) // FIXME doesn't work yet
|
||||
})
|
||||
|
||||
t.Run("UploadMkdir", func(t *testing.T) {
|
||||
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
|
||||
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
|
||||
|
||||
t.Run("List", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, "upload")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "upload/dir", entries[0].Remote())
|
||||
|
||||
entries, err = f.List(ctx, "upload/dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
|
||||
})
|
||||
|
||||
t.Run("Rmdir", func(t *testing.T) {
|
||||
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
|
||||
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
|
||||
|
||||
})
|
||||
|
||||
t.Run("ListEmpty", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, "upload")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(entries))
|
||||
|
||||
_, err = f.List(ctx, "upload/dir")
|
||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Upload", func(t *testing.T) {
|
||||
uploadDir := "upload/dir/subdir"
|
||||
remote := path.Join(uploadDir, fileNameUpload)
|
||||
|
||||
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
||||
|
||||
t.Run("List", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, uploadDir)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, remote, entries[0].Remote())
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remote, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewObjectWithID", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remoteWithID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remoteWithID, o.Remote())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
t.Run("Name", func(t *testing.T) {
|
||||
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
|
||||
})
|
||||
|
||||
t.Run("Root", func(t *testing.T) {
|
||||
assert.Equal(t, "", f.Root())
|
||||
})
|
||||
|
||||
t.Run("String", func(t *testing.T) {
|
||||
assert.Equal(t, `Google Photos path ""`, f.String())
|
||||
})
|
||||
|
||||
t.Run("Features", func(t *testing.T) {
|
||||
features := f.Features()
|
||||
assert.False(t, features.CaseInsensitive)
|
||||
assert.True(t, features.ReadMimeType)
|
||||
})
|
||||
|
||||
t.Run("Precision", func(t *testing.T) {
|
||||
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
|
||||
})
|
||||
|
||||
t.Run("Hashes", func(t *testing.T) {
|
||||
assert.Equal(t, hash.Set(hash.None), f.Hashes())
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAddID(t *testing.T) {
|
||||
assert.Equal(t, "potato {123}", addID("potato", "123"))
|
||||
assert.Equal(t, "{123}", addID("", "123"))
|
||||
}
|
||||
|
||||
func TestFileAddID(t *testing.T) {
|
||||
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
|
||||
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
|
||||
assert.Equal(t, "{123}", addFileID("", "123"))
|
||||
}
|
||||
|
||||
func TestFindID(t *testing.T) {
|
||||
assert.Equal(t, "", findID("potato"))
|
||||
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
|
||||
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
|
||||
ID = ID[1:]
|
||||
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
|
||||
}
|
||||
335
backend/googlephotos/pattern.go
Normal file
335
backend/googlephotos/pattern.go
Normal file
@@ -0,0 +1,335 @@
|
||||
// Store the parsing of file patterns
|
||||
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// lister describes the subset of the interfaces on Fs needed for the
|
||||
// file pattern parsing
|
||||
type lister interface {
|
||||
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
|
||||
listAlbums(shared bool) (all *albums, err error)
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
}
|
||||
|
||||
// dirPattern describes a single directory pattern
|
||||
type dirPattern struct {
|
||||
re string // match for the path
|
||||
match *regexp.Regexp // compiled match
|
||||
canUpload bool // true if can upload here
|
||||
canMkdir bool // true if can make a directory here
|
||||
isFile bool // true if this is a file
|
||||
isUpload bool // true if this is the upload directory
|
||||
// function to turn a match into DirEntries
|
||||
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
||||
}
|
||||
|
||||
// dirPatters is a slice of all the directory patterns
|
||||
type dirPatterns []dirPattern
|
||||
|
||||
// patterns describes the layout of the google photos backend file system.
|
||||
//
|
||||
// NB no trailing / on paths
|
||||
var patterns = dirPatterns{
|
||||
{
|
||||
re: `^$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return fs.DirEntries{
|
||||
fs.NewDir(prefix+"media", f.dirTime()),
|
||||
fs.NewDir(prefix+"album", f.dirTime()),
|
||||
fs.NewDir(prefix+"shared-album", f.dirTime()),
|
||||
fs.NewDir(prefix+"upload", f.dirTime()),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^upload(?:/(.*))?$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return f.listUploads(ctx, match[0])
|
||||
},
|
||||
canUpload: true,
|
||||
canMkdir: true,
|
||||
isUpload: true,
|
||||
},
|
||||
{
|
||||
re: `^upload/(.*)$`,
|
||||
isFile: true,
|
||||
canUpload: true,
|
||||
isUpload: true,
|
||||
},
|
||||
{
|
||||
re: `^media$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return fs.DirEntries{
|
||||
fs.NewDir(prefix+"all", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-year", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-month", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-day", f.dirTime()),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/all$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return f.listDir(ctx, prefix, api.SearchFilter{})
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/all/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-year$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-year/(\d{4})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-year/(\d{4})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/(\d{4})$`,
|
||||
toEntries: months,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/(\d{4})$`,
|
||||
toEntries: days,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^album$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, false, prefix, "")
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^album/(.+)$`,
|
||||
canMkdir: true,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, false, prefix, match[1])
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^album/(.+?)/([^/]+)$`,
|
||||
canUpload: true,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^shared-album$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, true, prefix, "")
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^shared-album/(.+)$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, true, prefix, match[1])
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^shared-album/(.+?)/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
}.mustCompile()
|
||||
|
||||
// mustCompile compiles the regexps in the dirPatterns
|
||||
func (ds dirPatterns) mustCompile() dirPatterns {
|
||||
for i := range ds {
|
||||
pattern := &ds[i]
|
||||
pattern.match = regexp.MustCompile(pattern.re)
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
// match finds the path passed in in the matching structure and
|
||||
// returns the parameters and a pointer to the match, or nil.
|
||||
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
|
||||
itemPath = strings.Trim(itemPath, "/")
|
||||
absPath := path.Join(root, itemPath)
|
||||
prefix = strings.Trim(absPath[len(root):], "/")
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
for i := range ds {
|
||||
pattern = &ds[i]
|
||||
if pattern.isFile != isFile {
|
||||
continue
|
||||
}
|
||||
match = pattern.match.FindStringSubmatch(absPath)
|
||||
if match != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
// Return the years from 2000 to today
|
||||
// FIXME make configurable?
|
||||
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
currentYear := f.dirTime().Year()
|
||||
for year := 2000; year <= currentYear; year++ {
|
||||
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return the months in a given year
|
||||
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
year := match[1]
|
||||
for month := 1; month <= 12; month++ {
|
||||
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return the days in a given year
|
||||
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
year := match[1]
|
||||
current, err := time.Parse("2006", year)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("bad year %q", match[1])
|
||||
}
|
||||
currentYear := current.Year()
|
||||
for current.Year() == currentYear {
|
||||
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
|
||||
current = current.AddDate(0, 0, 1)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// This creates a search filter on year/month/day as provided
|
||||
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
||||
year, err := strconv.Atoi(match[1])
|
||||
if err != nil || year < 1000 || year > 3000 {
|
||||
return sf, errors.Errorf("bad year %q", match[1])
|
||||
}
|
||||
sf = api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Year: year,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if len(match) >= 3 {
|
||||
month, err := strconv.Atoi(match[2])
|
||||
if err != nil || month < 1 || month > 12 {
|
||||
return sf, errors.Errorf("bad month %q", match[2])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Month = month
|
||||
}
|
||||
if len(match) >= 4 {
|
||||
day, err := strconv.Atoi(match[3])
|
||||
if err != nil || day < 1 || day > 31 {
|
||||
return sf, errors.Errorf("bad day %q", match[3])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Day = day
|
||||
}
|
||||
return sf, nil
|
||||
}
|
||||
|
||||
// Turns an albumPath into entries
|
||||
//
|
||||
// These can either be synthetic directory entries if the album path
|
||||
// is a prefix of another album, or actual files, or a combination of
|
||||
// the two.
|
||||
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
|
||||
albums, err := f.listAlbums(shared)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Put in the directories
|
||||
dirs, foundAlbumPath := albums.getDirs(albumPath)
|
||||
if foundAlbumPath {
|
||||
for _, dir := range dirs {
|
||||
d := fs.NewDir(prefix+dir, f.dirTime())
|
||||
dirPath := path.Join(albumPath, dir)
|
||||
// if this dir is an album add more special stuff
|
||||
album, ok := albums.get(dirPath)
|
||||
if ok {
|
||||
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Error reading media count: %v", err)
|
||||
}
|
||||
d.SetID(album.ID).SetItems(count)
|
||||
}
|
||||
entries = append(entries, d)
|
||||
}
|
||||
}
|
||||
// if this is an album then return a filter to list it
|
||||
album, foundAlbum := albums.get(albumPath)
|
||||
if foundAlbum {
|
||||
filter := api.SearchFilter{AlbumID: album.ID}
|
||||
newEntries, err := f.listDir(ctx, prefix, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, newEntries...)
|
||||
}
|
||||
if !foundAlbumPath && !foundAlbum && albumPath != "" {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
495
backend/googlephotos/pattern_test.go
Normal file
495
backend/googlephotos/pattern_test.go
Normal file
@@ -0,0 +1,495 @@
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// time for directories
|
||||
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
|
||||
|
||||
// mock Fs for testing patterns
|
||||
type testLister struct {
|
||||
t *testing.T
|
||||
albums *albums
|
||||
names []string
|
||||
uploaded dirtree.DirTree
|
||||
}
|
||||
|
||||
// newTestLister makes a mock for testing
|
||||
func newTestLister(t *testing.T) *testLister {
|
||||
return &testLister{
|
||||
t: t,
|
||||
albums: newAlbums(),
|
||||
uploaded: dirtree.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// mock listDir for testing
|
||||
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||
for _, name := range f.names {
|
||||
entries = append(entries, mockobject.New(prefix+name))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// mock listAlbums for testing
|
||||
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
|
||||
return f.albums, nil
|
||||
}
|
||||
|
||||
// mock listUploads for testing
|
||||
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, _ = f.uploaded[dir]
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// mock dirTime for testing
|
||||
func (f *testLister) dirTime() time.Time {
|
||||
return startTime
|
||||
}
|
||||
|
||||
func TestPatternMatch(t *testing.T) {
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
root string
|
||||
itemPath string
|
||||
isFile bool
|
||||
// expected output
|
||||
wantMatch []string
|
||||
wantPrefix string
|
||||
wantPattern *dirPattern
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{""},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[0],
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
isFile: true,
|
||||
wantMatch: nil,
|
||||
wantPrefix: "",
|
||||
wantPattern: nil,
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"upload", ""},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[1],
|
||||
},
|
||||
{
|
||||
root: "upload/dir",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"upload/dir", "dir"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[1],
|
||||
},
|
||||
{
|
||||
root: "upload/file.jpg",
|
||||
itemPath: "",
|
||||
isFile: true,
|
||||
wantMatch: []string{"upload/file.jpg", "file.jpg"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[2],
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[3],
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
itemPath: "media",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "media/",
|
||||
wantPattern: &patterns[3],
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[4],
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "all",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "all/",
|
||||
wantPattern: &patterns[4],
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "file.jpg",
|
||||
isFile: true,
|
||||
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
|
||||
wantPrefix: "file.jpg/",
|
||||
wantPattern: &patterns[5],
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
|
||||
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
|
||||
assert.Equal(t, test.wantMatch, gotMatch)
|
||||
assert.Equal(t, test.wantPrefix, gotPrefix)
|
||||
assert.Equal(t, test.wantPattern, gotPattern)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternMatchToEntries(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := newTestLister(t)
|
||||
f.names = []string{"file.jpg"}
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub/one",
|
||||
})
|
||||
f.albums.add(&api.Album{
|
||||
ID: "2",
|
||||
Title: "sub",
|
||||
})
|
||||
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
|
||||
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
|
||||
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
root string
|
||||
itemPath string
|
||||
// expected output
|
||||
wantMatch []string
|
||||
wantPrefix string
|
||||
remotes []string
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
wantMatch: []string{""},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"upload", ""},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"upload/file1.jpg", "upload/dir/"},
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "dir",
|
||||
wantMatch: []string{"upload/dir", "dir"},
|
||||
wantPrefix: "dir/",
|
||||
remotes: []string{"upload/dir/file2.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "all",
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "all/",
|
||||
remotes: []string{"all/file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-year",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-year"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-year/2000",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-year/2000", "2000"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month/2001",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month/2001", "2001"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month/2001/2001-01",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day/2001",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day/2001", "2001"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day/2001/2001-01-02",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "album",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"sub/"},
|
||||
},
|
||||
{
|
||||
root: "album/sub",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album/sub", "sub"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"one/", "file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "album/sub/one",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album/sub/one", "sub/one"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "shared-album",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"sub/"},
|
||||
},
|
||||
{
|
||||
root: "shared-album/sub",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album/sub", "sub"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"one/", "file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "shared-album/sub/one",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album/sub/one", "sub/one"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
|
||||
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
|
||||
assert.Equal(t, test.wantMatch, match)
|
||||
assert.Equal(t, test.wantPrefix, prefix)
|
||||
assert.NotNil(t, pattern)
|
||||
assert.NotNil(t, pattern.toEntries)
|
||||
|
||||
entries, err := pattern.toEntries(ctx, f, prefix, match)
|
||||
assert.NoError(t, err)
|
||||
var remotes = []string{}
|
||||
for _, entry := range entries {
|
||||
remote := entry.Remote()
|
||||
if _, isDir := entry.(fs.Directory); isDir {
|
||||
remote += "/"
|
||||
}
|
||||
remotes = append(remotes, remote)
|
||||
if len(remotes) >= 4 {
|
||||
break // only test first 4 entries
|
||||
}
|
||||
}
|
||||
assert.Equal(t, test.remotes, remotes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternYears(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := years(context.Background(), f, "potato/", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
year := 2000
|
||||
for _, entry := range entries {
|
||||
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
|
||||
year++
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternMonths(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 12, len(entries))
|
||||
for i, entry := range entries {
|
||||
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternDays(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 366, len(entries))
|
||||
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
|
||||
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
|
||||
}
|
||||
|
||||
func TestPatternYearMonthDayFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := newTestLister(t)
|
||||
|
||||
// Years
|
||||
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
|
||||
require.Error(t, err)
|
||||
|
||||
// Months
|
||||
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Month: 1,
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
|
||||
require.Error(t, err)
|
||||
|
||||
// Days
|
||||
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Day: 2,
|
||||
Month: 1,
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestPatternAlbumsToEntries(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub/one",
|
||||
})
|
||||
|
||||
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "potato/one", entries[0].Remote())
|
||||
_, ok := entries[0].(fs.Directory)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub",
|
||||
})
|
||||
f.names = []string{"file.jpg"}
|
||||
|
||||
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(entries))
|
||||
assert.Equal(t, "potato/one", entries[0].Remote())
|
||||
_, ok = entries[0].(fs.Directory)
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
|
||||
_, ok = entries[1].(fs.Object)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
}
|
||||
BIN
backend/googlephotos/testfiles/rclone-test-image1.jpg
Normal file
BIN
backend/googlephotos/testfiles/rclone-test-image1.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
BIN
backend/googlephotos/testfiles/rclone-test-image2.jpg
Normal file
BIN
backend/googlephotos/testfiles/rclone-test-image2.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
551
backend/http/http.go
Normal file
551
backend/http/http.go
Normal file
@@ -0,0 +1,551 @@
|
||||
// Package http provides a filesystem interface using golang.org/net/http
|
||||
//
|
||||
// It treats HTML pages served from the endpoint as directory
|
||||
// listings, and includes any links found as files.
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/net/html"
|
||||
)
|
||||
|
||||
var (
|
||||
errorReadOnly = errors.New("http remotes are read only")
|
||||
timeUnset = time.Unix(0, 0)
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "http",
|
||||
Description: "http Connection",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}, {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}, {
|
||||
Name: "no_slash",
|
||||
Help: `Set this if the site doesn't end directories with /
|
||||
|
||||
Use this if your target website does not use / on the end of
|
||||
directories.
|
||||
|
||||
A / on the end of a path is how rclone normally tells the difference
|
||||
between files and directories. If this flag is set, then rclone will
|
||||
treat all files with Content-Type: text/html as directories and read
|
||||
URLs from them rather than downloading them.
|
||||
|
||||
Note that this may cause rclone to confuse genuine HTML files with
|
||||
directories.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
endpoint *url.URL
|
||||
endpointURL string // endpoint as a string
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
contentType string
|
||||
}
|
||||
|
||||
// statusError returns an error if the res contained an error
|
||||
func statusError(res *http.Response, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
_ = res.Body.Close()
|
||||
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||
opt.Endpoint += "/"
|
||||
}
|
||||
|
||||
// Parse the endpoint and stick the root onto it
|
||||
base, err := url.Parse(opt.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
// Make a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
res, err := noRedir.Head(u.String())
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
|
||||
newRoot := u.String()
|
||||
if isFile {
|
||||
// Point to the parent if this is a file
|
||||
newRoot, _ = path.Split(u.String())
|
||||
} else {
|
||||
if !strings.HasSuffix(newRoot, "/") {
|
||||
newRoot += "/"
|
||||
}
|
||||
}
|
||||
|
||||
u, err = url.Parse(newRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if isFile {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if !strings.HasSuffix(f.endpointURL, "/") {
|
||||
return nil, errors.New("internal error: url doesn't end with /")
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name returns the configured name of the file system
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root for the filesystem
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns the URL for the filesystem
|
||||
func (f *Fs) String() string {
|
||||
return f.endpointURL
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// NewObject creates a new remote http file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Join's the remote onto the base URL
|
||||
func (f *Fs) url(remote string) string {
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
}
|
||||
|
||||
// parse s into an int64, on failure return def
|
||||
func parseInt64(s string, def int64) int64 {
|
||||
n, e := strconv.ParseInt(s, 10, 64)
|
||||
if e != nil {
|
||||
return def
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Errors returned by parseName
|
||||
var (
|
||||
errURLJoinFailed = errors.New("URLJoin failed")
|
||||
errFoundQuestionMark = errors.New("found ? in URL")
|
||||
errHostMismatch = errors.New("host mismatch")
|
||||
errSchemeMismatch = errors.New("scheme mismatch")
|
||||
errNotUnderRoot = errors.New("not under root")
|
||||
errNameIsEmpty = errors.New("name is empty")
|
||||
errNameContainsSlash = errors.New("name contains /")
|
||||
)
|
||||
|
||||
// parseName turns a name as found in the page into a remote path or returns an error
|
||||
func parseName(base *url.URL, name string) (string, error) {
|
||||
// make URL absolute
|
||||
u, err := rest.URLJoin(base, name)
|
||||
if err != nil {
|
||||
return "", errURLJoinFailed
|
||||
}
|
||||
// check it doesn't have URL parameters
|
||||
uStr := u.String()
|
||||
if strings.Index(uStr, "?") >= 0 {
|
||||
return "", errFoundQuestionMark
|
||||
}
|
||||
// check that this is going back to the same host and scheme
|
||||
if base.Host != u.Host {
|
||||
return "", errHostMismatch
|
||||
}
|
||||
if base.Scheme != u.Scheme {
|
||||
return "", errSchemeMismatch
|
||||
}
|
||||
// check has path prefix
|
||||
if !strings.HasPrefix(u.Path, base.Path) {
|
||||
return "", errNotUnderRoot
|
||||
}
|
||||
// calculate the name relative to the base
|
||||
name = u.Path[len(base.Path):]
|
||||
// mustn't be empty
|
||||
if name == "" {
|
||||
return "", errNameIsEmpty
|
||||
}
|
||||
// mustn't contain a / - we are looking for a single level directory
|
||||
slash := strings.Index(name, "/")
|
||||
if slash >= 0 && slash != len(name)-1 {
|
||||
return "", errNameContainsSlash
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// Parse turns HTML for a directory into names
|
||||
// base should be the base URL to resolve any relative names from
|
||||
func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
doc, err := html.Parse(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
walk func(*html.Node)
|
||||
seen = make(map[string]struct{})
|
||||
)
|
||||
walk = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "href" {
|
||||
name, err := parseName(base, a.Val)
|
||||
if err == nil {
|
||||
if _, found := seen[name]; !found {
|
||||
names = append(names, name)
|
||||
seen[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
walk(c)
|
||||
}
|
||||
}
|
||||
walk(doc)
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Read the directory passed in
|
||||
func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
URL := f.url(dir)
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
if !strings.HasSuffix(URL, "/") {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
res, err := f.httpClient.Get(URL)
|
||||
if err == nil {
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
|
||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||
switch contentType {
|
||||
case "text/html":
|
||||
names, err = parse(u, res.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir")
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("Can't parse content type %q", contentType)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||
dir += "/"
|
||||
}
|
||||
names, err := f.readDir(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
}
|
||||
for _, name := range names {
|
||||
isDir := name[len(name)-1] == '/'
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
} else {
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err = file.stat(); err {
|
||||
case nil:
|
||||
entries = append(entries, file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote http file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// String returns the URL to the remote HTTP file
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size in bytes of the remote http file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// url returns the native url of the object
|
||||
func (o *Object) url() string {
|
||||
return o.fs.url(o.remote)
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
}
|
||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
t = timeUnset
|
||||
}
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
// If NoSlash is set then check ContentType to see if it is a directory
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
|
||||
}
|
||||
if mediaType == "text/html" {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModTime sets the modification and access time to the specified time
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
|
||||
// Add optional headers
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
|
||||
// Do the request
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Remove a remote http file object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
345
backend/http/http_internal_test.go
Normal file
345
backend/http/http_internal_test.go
Normal file
@@ -0,0 +1,345 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(fileServer)
|
||||
|
||||
// Configure the remote
|
||||
config.LoadConfig()
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
// config.FileSet(remoteName, "type", "http")
|
||||
// config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
// return a function to tidy up
|
||||
return m, ts.Close
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
require.Equal(t, 4, len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "four", e.Remote())
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok := e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "one%.txt", e.Remote())
|
||||
assert.Equal(t, int64(6), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[2]
|
||||
assert.Equal(t, "three", e.Remote())
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[3]
|
||||
assert.Equal(t, "two.html", e.Remote())
|
||||
if noSlash {
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
} else {
|
||||
assert.Equal(t, int64(41), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListRoot(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestListRootNoSlash(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
f.(*Fs).opt.NoSlash = true
|
||||
defer tidy()
|
||||
|
||||
testListRoot(t, f, true)
|
||||
}
|
||||
|
||||
func TestListSubDir(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
entries, err := f.List(context.Background(), "three")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
assert.Equal(t, 1, len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "three/underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestNewObject(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
_, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Test the time is correct on the object
|
||||
|
||||
tObj := o.ModTime(context.Background())
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject(context.Background(), "not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test normal read
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
|
||||
func TestMimeType(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
do, ok := o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
assert.Equal(t, 1, len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestParseName(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
base string
|
||||
val string
|
||||
wantErr error
|
||||
want string
|
||||
}{
|
||||
{"http://example.com/", "potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "potato?download=true", errFoundQuestionMark, ""},
|
||||
{"http://example.com/dir/", "../dir/potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "..", errNotUnderRoot, ""},
|
||||
{"http://example.com/dir/", "http://example.com/", errNotUnderRoot, ""},
|
||||
{"http://example.com/dir/", "http://example.com/dir/", errNameIsEmpty, ""},
|
||||
{"http://example.com/dir/", "http://example.com/dir/potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "https://example.com/dir/potato", errSchemeMismatch, ""},
|
||||
{"http://example.com/dir/", "http://notexample.com/dir/potato", errHostMismatch, ""},
|
||||
{"http://example.com/dir/", "/dir/", errNameIsEmpty, ""},
|
||||
{"http://example.com/dir/", "/dir/potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "subdir/potato", errNameContainsSlash, ""},
|
||||
{"http://example.com/dir/", "With percent %25.txt", nil, "With percent %.txt"},
|
||||
{"http://example.com/dir/", "With colon :", errURLJoinFailed, ""},
|
||||
{"http://example.com/dir/", rest.URLPathEscape("With colon :"), nil, "With colon :"},
|
||||
{"http://example.com/Dungeons%20%26%20Dragons/", "/Dungeons%20&%20Dragons/D%26D%20Basic%20%28Holmes%2C%20B%2C%20X%2C%20BECMI%29/", nil, "D&D Basic (Holmes, B, X, BECMI)/"},
|
||||
} {
|
||||
u, err := url.Parse(test.base)
|
||||
require.NoError(t, err)
|
||||
got, gotErr := parseName(u, test.val)
|
||||
what := fmt.Sprintf("test %d base=%q, val=%q", i, test.base, test.val)
|
||||
assert.Equal(t, test.wantErr, gotErr, what)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
}
|
||||
|
||||
// Load HTML from the file given and parse it, checking it against the entries passed in
|
||||
func parseHTML(t *testing.T, name string, base string, want []string) {
|
||||
in, err := os.Open(filepath.Join(testPath, "index_files", name))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, in.Close())
|
||||
}()
|
||||
if base == "" {
|
||||
base = "http://example.com/"
|
||||
}
|
||||
u, err := url.Parse(base)
|
||||
require.NoError(t, err)
|
||||
entries, err := parse(u, in)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, want, entries)
|
||||
}
|
||||
|
||||
func TestParseEmpty(t *testing.T) {
|
||||
parseHTML(t, "empty.html", "", []string(nil))
|
||||
}
|
||||
|
||||
func TestParseApache(t *testing.T) {
|
||||
parseHTML(t, "apache.html", "http://example.com/nick/pub/", []string{
|
||||
"SWIG-embed.tar.gz",
|
||||
"avi2dvd.pl",
|
||||
"cambert.exe",
|
||||
"cambert.gz",
|
||||
"fedora_demo.gz",
|
||||
"gchq-challenge/",
|
||||
"mandelterm/",
|
||||
"pgp-key.txt",
|
||||
"pymath/",
|
||||
"rclone",
|
||||
"readdir.exe",
|
||||
"rush_hour_solver_cut_down.py",
|
||||
"snake-puzzle/",
|
||||
"stressdisk/",
|
||||
"timer-test",
|
||||
"words-to-regexp.pl",
|
||||
"Now 100% better.mp3",
|
||||
"Now better.mp3",
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseMemstore(t *testing.T) {
|
||||
parseHTML(t, "memstore.html", "", []string{
|
||||
"test/",
|
||||
"v1.35/",
|
||||
"v1.36-01-g503cd84/",
|
||||
"rclone-beta-latest-freebsd-386.zip",
|
||||
"rclone-beta-latest-freebsd-amd64.zip",
|
||||
"rclone-beta-latest-windows-amd64.zip",
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseNginx(t *testing.T) {
|
||||
parseHTML(t, "nginx.html", "", []string{
|
||||
"deltas/",
|
||||
"objects/",
|
||||
"refs/",
|
||||
"state/",
|
||||
"config",
|
||||
"summary",
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseCaddy(t *testing.T) {
|
||||
parseHTML(t, "caddy.html", "", []string{
|
||||
"mimetype.zip",
|
||||
"rclone-delete-empty-dirs.py",
|
||||
"rclone-show-empty-dirs.py",
|
||||
"stat-windows-386.zip",
|
||||
"v1.36-155-gcf29ee8b-team-driveβ/",
|
||||
"v1.36-156-gca76b3fb-team-driveβ/",
|
||||
"v1.36-156-ge1f0e0f5-team-driveβ/",
|
||||
"v1.36-22-g06ea13a-ssh-agentβ/",
|
||||
})
|
||||
}
|
||||
1
backend/http/test/files/four/under four.txt
Normal file
1
backend/http/test/files/four/under four.txt
Normal file
@@ -0,0 +1 @@
|
||||
beetroot
|
||||
1
backend/http/test/files/one%.txt
Normal file
1
backend/http/test/files/one%.txt
Normal file
@@ -0,0 +1 @@
|
||||
hello
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user