mirror of
https://github.com/rclone/rclone.git
synced 2026-01-07 02:54:04 +00:00
Compare commits
556 Commits
crypt-pass
...
v1.49.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a6387e1f81 | ||
|
|
a992a910ef | ||
|
|
ce3340621f | ||
|
|
73e010aff9 | ||
|
|
a3faf98aa0 | ||
|
|
ed85092edb | ||
|
|
193c30d570 | ||
|
|
beb8d5c134 | ||
|
|
93810a739d | ||
|
|
5d4d5d2b07 | ||
|
|
f02fc5d5b5 | ||
|
|
eab999f631 | ||
|
|
bd61eb89bc | ||
|
|
077b45322d | ||
|
|
67fae720d7 | ||
|
|
39ae7c7ac0 | ||
|
|
f67798d73e | ||
|
|
a1ca65bd80 | ||
|
|
566aa0fca7 | ||
|
|
8159658e67 | ||
|
|
6f16588123 | ||
|
|
e339c9ff8f | ||
|
|
3247e69cf5 | ||
|
|
341d880027 | ||
|
|
941dde6940 | ||
|
|
40cc8180f0 | ||
|
|
159f2e29a8 | ||
|
|
efd826ad4b | ||
|
|
5d6593de4f | ||
|
|
82c6c77e07 | ||
|
|
badc8b3293 | ||
|
|
27a9d0f570 | ||
|
|
6ca00c21a4 | ||
|
|
b619430bcf | ||
|
|
8a0775ce3c | ||
|
|
d8e9b1a67c | ||
|
|
e0e0e0c7bd | ||
|
|
eaaf2ded94 | ||
|
|
eaeef4811f | ||
|
|
d266a171c2 | ||
|
|
df8bdf0dcb | ||
|
|
743dabf159 | ||
|
|
9f549f848d | ||
|
|
af3c47d282 | ||
|
|
ba0e1ea6ae | ||
|
|
82b3bfec3c | ||
|
|
898782ac35 | ||
|
|
4e43fa746a | ||
|
|
acc9dadcdc | ||
|
|
712f7e38f7 | ||
|
|
24161d12ab | ||
|
|
fa539b9d9b | ||
|
|
3ea82032e7 | ||
|
|
71e172a139 | ||
|
|
6929f5d6e6 | ||
|
|
c2050172aa | ||
|
|
a72ef7ca0e | ||
|
|
b84cc0cae7 | ||
|
|
93228dfcc9 | ||
|
|
eb087a3b04 | ||
|
|
ec8e0a6c58 | ||
|
|
f0e0d6cc3c | ||
|
|
752d43d6fa | ||
|
|
7c146e2618 | ||
|
|
f9ceade9b4 | ||
|
|
ae9c0e56c8 | ||
|
|
402aaca7fe | ||
|
|
106cf1852d | ||
|
|
50b8f15b5d | ||
|
|
1e7bc359be | ||
|
|
23a0332185 | ||
|
|
6812844b3d | ||
|
|
3a04d0d1a9 | ||
|
|
6f4b86e569 | ||
|
|
9aa889bfa2 | ||
|
|
8247c8a6af | ||
|
|
535f5f3c99 | ||
|
|
7f7946564d | ||
|
|
bbb8d43716 | ||
|
|
5e0a30509c | ||
|
|
cd7ca2a320 | ||
|
|
a808e98fe1 | ||
|
|
3ebcb555f4 | ||
|
|
a1263e70cf | ||
|
|
f47e5220a2 | ||
|
|
4db742dc77 | ||
|
|
3ecbd603ab | ||
|
|
0693deea1c | ||
|
|
99eaa76dc8 | ||
|
|
ba3b0a175e | ||
|
|
01c0c0b009 | ||
|
|
7d85ccb11e | ||
|
|
0c1eaf1bcb | ||
|
|
873e87fc38 | ||
|
|
33677ff367 | ||
|
|
5195075677 | ||
|
|
f396550934 | ||
|
|
6f87267b34 | ||
|
|
9d1fb2f4e7 | ||
|
|
99b3154abd | ||
|
|
6c38bddf3e | ||
|
|
a00a0471a8 | ||
|
|
9e81fc343e | ||
|
|
fdef567da6 | ||
|
|
d377842395 | ||
|
|
c014b2e66b | ||
|
|
62b769a0a7 | ||
|
|
84b5da089e | ||
|
|
d0c65b4c5e | ||
|
|
e502be475a | ||
|
|
27a075e9fc | ||
|
|
5065c422b4 | ||
|
|
72d5b11d1b | ||
|
|
526a3347ac | ||
|
|
23910ba53b | ||
|
|
ee7101e6af | ||
|
|
36c1b37dd9 | ||
|
|
72782bdda6 | ||
|
|
b94eef16c1 | ||
|
|
d75fbe4852 | ||
|
|
e6ab237fcd | ||
|
|
a7eec91d69 | ||
|
|
b3e94b018c | ||
|
|
ca0e9ea55d | ||
|
|
53e3c2e263 | ||
|
|
02eb747d71 | ||
|
|
d51a970932 | ||
|
|
a9438cf364 | ||
|
|
5ef3c988eb | ||
|
|
78150e82a2 | ||
|
|
6f0cc51eeb | ||
|
|
84e2806c4b | ||
|
|
0386d22cc9 | ||
|
|
0be14120e4 | ||
|
|
95af1f9ccf | ||
|
|
629b7eacd8 | ||
|
|
d3149acc32 | ||
|
|
6a3e301303 | ||
|
|
5be968c0ca | ||
|
|
f1a687c540 | ||
|
|
94ee43fe54 | ||
|
|
c2635e39cc | ||
|
|
8c511ec9cd | ||
|
|
ac0dce78d0 | ||
|
|
f347514f62 | ||
|
|
57d5de6fba | ||
|
|
4ba6532915 | ||
|
|
ff235e4e56 | ||
|
|
68e641f6cf | ||
|
|
53a1a0e3ef | ||
|
|
8243ff8bc8 | ||
|
|
be0464f5f1 | ||
|
|
2d561b51db | ||
|
|
9241a93c2d | ||
|
|
fb32f77bac | ||
|
|
520fb03bfd | ||
|
|
a3449bda30 | ||
|
|
ccc416e62b | ||
|
|
a35aa1360e | ||
|
|
3df9dbf887 | ||
|
|
9af0a704af | ||
|
|
691e5ae5f0 | ||
|
|
5a44bafa4e | ||
|
|
8fdce31700 | ||
|
|
493dfb68fd | ||
|
|
71587344c6 | ||
|
|
8e8b78d7e5 | ||
|
|
266600dba7 | ||
|
|
e4f6ccbff2 | ||
|
|
1f1ab179a6 | ||
|
|
c642531a1e | ||
|
|
19ae053168 | ||
|
|
def790986c | ||
|
|
0a1169e659 | ||
|
|
5433021e8b | ||
|
|
c9f77719e4 | ||
|
|
3cd63a00be | ||
|
|
d7016866e0 | ||
|
|
d72e4105fb | ||
|
|
b4266da4eb | ||
|
|
3f5767b94e | ||
|
|
1510e12659 | ||
|
|
ede03258bc | ||
|
|
7fcbb47b1c | ||
|
|
9cafeeb4b6 | ||
|
|
a1cfe61ffd | ||
|
|
5eebbaaac4 | ||
|
|
bc70bff125 | ||
|
|
cf15b88efa | ||
|
|
dcaee0016a | ||
|
|
387b496d1e | ||
|
|
734f504d5f | ||
|
|
7153909390 | ||
|
|
ea35e807db | ||
|
|
5df5a3b78e | ||
|
|
37c1144b46 | ||
|
|
8d116ba0c9 | ||
|
|
6a3c3d9b89 | ||
|
|
a6dca4c13f | ||
|
|
cc0800a72e | ||
|
|
1be1fc073e | ||
|
|
70c6b01f54 | ||
|
|
7b2b396d37 | ||
|
|
af2596f98b | ||
|
|
61fb326a80 | ||
|
|
de14378734 | ||
|
|
eea1b6de32 | ||
|
|
6bae3595a8 | ||
|
|
dde4dd0198 | ||
|
|
2d0e9885bd | ||
|
|
9ed81ac451 | ||
|
|
3245c0ae0d | ||
|
|
6ff7b2eaab | ||
|
|
38ebdf54be | ||
|
|
6cd7c3b774 | ||
|
|
07e2c3a50f | ||
|
|
cd762f04b8 | ||
|
|
6907242cae | ||
|
|
d61ba7ef78 | ||
|
|
b221d79273 | ||
|
|
940d88b695 | ||
|
|
ca324b5084 | ||
|
|
9f4589a997 | ||
|
|
fc44eb4093 | ||
|
|
a1840f6fc7 | ||
|
|
0cb7130dd2 | ||
|
|
2655bea86f | ||
|
|
08bf8faa2f | ||
|
|
4e64ee38e2 | ||
|
|
276f8cccf6 | ||
|
|
0ae844d1f8 | ||
|
|
4ee6de5c3e | ||
|
|
71a19a1972 | ||
|
|
ba72e62b41 | ||
|
|
5935cb0a29 | ||
|
|
f78cd1e043 | ||
|
|
a2c317b46e | ||
|
|
6a2a075c14 | ||
|
|
628530362a | ||
|
|
4549305fec | ||
|
|
245fed513a | ||
|
|
52332a4b24 | ||
|
|
3087c5d559 | ||
|
|
75606dcc27 | ||
|
|
f3719fe269 | ||
|
|
d2be792d5e | ||
|
|
2793d4b4cc | ||
|
|
30ac9d920a | ||
|
|
6e8e620e71 | ||
|
|
5597d6d871 | ||
|
|
622e0d19ce | ||
|
|
ce400a8fdc | ||
|
|
49c05cb89b | ||
|
|
d533de0f5c | ||
|
|
1a4fe4bc6c | ||
|
|
93207ead9c | ||
|
|
22368b997c | ||
|
|
a5bed67016 | ||
|
|
44f6491731 | ||
|
|
12c2a750f5 | ||
|
|
92bbae5cca | ||
|
|
939b19c3b7 | ||
|
|
64fb4effa7 | ||
|
|
4d195d5a52 | ||
|
|
976a020a2f | ||
|
|
550ab441c5 | ||
|
|
e24cadc7a1 | ||
|
|
903ede52cd | ||
|
|
f681d32996 | ||
|
|
2c72e7f0a2 | ||
|
|
db8cd1a993 | ||
|
|
2890b69c48 | ||
|
|
66b3795eb8 | ||
|
|
45f41c2c4a | ||
|
|
34f03ce590 | ||
|
|
e2fde62cd9 | ||
|
|
4b27c6719b | ||
|
|
fb6966b5fe | ||
|
|
454dfd3c9e | ||
|
|
e1cf551ded | ||
|
|
bd10344d65 | ||
|
|
1aa65d60e1 | ||
|
|
aa81957586 | ||
|
|
b7800e96d7 | ||
|
|
fb1bbecb41 | ||
|
|
e4c2468244 | ||
|
|
ac4c8d8dfc | ||
|
|
e2b6172f7d | ||
|
|
32f2895472 | ||
|
|
1124c423ee | ||
|
|
cd5a2d80ca | ||
|
|
1fe0773da6 | ||
|
|
5a941cdcdc | ||
|
|
62681e45fb | ||
|
|
1a2fb52266 | ||
|
|
ec4e7316f2 | ||
|
|
11264c4fb8 | ||
|
|
25f7f2b60a | ||
|
|
e7c20e0bce | ||
|
|
8ee6034b23 | ||
|
|
206e1caa99 | ||
|
|
f0e439de0d | ||
|
|
e5464a2a35 | ||
|
|
78d38dda56 | ||
|
|
60bb01b22c | ||
|
|
95a74e02c7 | ||
|
|
d014aef011 | ||
|
|
be8c23f0b4 | ||
|
|
da3b685cd8 | ||
|
|
9aac2d6965 | ||
|
|
81fad0f0e3 | ||
|
|
cff85f0b95 | ||
|
|
9c0dac4ccd | ||
|
|
5ccc2dcb8f | ||
|
|
8c5503631a | ||
|
|
2f3d794ec6 | ||
|
|
abeb12c6df | ||
|
|
9c6f3ae82c | ||
|
|
870b15313e | ||
|
|
62a7e44e86 | ||
|
|
296e4936a0 | ||
|
|
a0b9d4a239 | ||
|
|
99bc013c0a | ||
|
|
d9cad9d10b | ||
|
|
0e23c4542f | ||
|
|
f544234e26 | ||
|
|
dbf9800cbc | ||
|
|
1f19b63264 | ||
|
|
5c0e5b85f7 | ||
|
|
edda6d91cd | ||
|
|
1fefa6adfd | ||
|
|
af030f74f5 | ||
|
|
ada8c22a97 | ||
|
|
610466c18c | ||
|
|
9950bb9b7c | ||
|
|
7d70e92664 | ||
|
|
687cbf3ded | ||
|
|
c3af0a1eca | ||
|
|
822483aac5 | ||
|
|
2eb31a4f1d | ||
|
|
0655738da6 | ||
|
|
7c4fe3eb75 | ||
|
|
72721f4c8d | ||
|
|
0c60c00187 | ||
|
|
0d511b7878 | ||
|
|
bd2a7ffcf4 | ||
|
|
7a5ee968e7 | ||
|
|
c809334b3d | ||
|
|
b88e50cc36 | ||
|
|
bbe28df800 | ||
|
|
f865280afa | ||
|
|
8beab1aaf2 | ||
|
|
b9e16b36e5 | ||
|
|
b68c3ce74d | ||
|
|
d04b0b856a | ||
|
|
d0ff07bdb0 | ||
|
|
577fda059d | ||
|
|
49d2ab512d | ||
|
|
9df322e889 | ||
|
|
8f89b03d7b | ||
|
|
48c09608ea | ||
|
|
7963320a29 | ||
|
|
81f8a5e0d9 | ||
|
|
2763598bd1 | ||
|
|
49d7b0d278 | ||
|
|
3d475dc0ee | ||
|
|
2657d70567 | ||
|
|
45df37f55f | ||
|
|
81007c10cb | ||
|
|
aba15f11d8 | ||
|
|
a57756a05c | ||
|
|
eeab7a0a43 | ||
|
|
ac8d1db8d3 | ||
|
|
cd0d43fffb | ||
|
|
cdf12b1fc8 | ||
|
|
7981e450a4 | ||
|
|
e7fc3dcd31 | ||
|
|
2386c5adc1 | ||
|
|
2f21aa86b4 | ||
|
|
16d8014cbb | ||
|
|
613a9bb86b | ||
|
|
8190a81201 | ||
|
|
f5795db6d2 | ||
|
|
e2a2eb349f | ||
|
|
a0d4fdb2fa | ||
|
|
a28239f005 | ||
|
|
b05da61c82 | ||
|
|
41f01da625 | ||
|
|
901811bb26 | ||
|
|
0d4a3520ad | ||
|
|
5855714474 | ||
|
|
120de505a9 | ||
|
|
6e86526c9d | ||
|
|
0862dc9b2b | ||
|
|
1c301f9f7a | ||
|
|
9f6b09dfaf | ||
|
|
3d424c6e08 | ||
|
|
6fb1c8f51c | ||
|
|
626f0d1886 | ||
|
|
9ee9fe3885 | ||
|
|
b0380aad95 | ||
|
|
2065e73d0b | ||
|
|
d3e3bbedf3 | ||
|
|
8d29d69ade | ||
|
|
6e70d88f54 | ||
|
|
595fea757d | ||
|
|
bb80586473 | ||
|
|
0d475958c7 | ||
|
|
2728948fb0 | ||
|
|
3756f211b5 | ||
|
|
2faf2aed80 | ||
|
|
1bd8183af1 | ||
|
|
5aa706831f | ||
|
|
ac7e1dbf62 | ||
|
|
14ef4437e5 | ||
|
|
a0d2ab5b4f | ||
|
|
3bfde5f52a | ||
|
|
2b05bd9a08 | ||
|
|
1318be3b0a | ||
|
|
f4a754a36b | ||
|
|
fef73763aa | ||
|
|
7267d19ad8 | ||
|
|
47099466c0 | ||
|
|
4376019062 | ||
|
|
e5f4210b09 | ||
|
|
d5f2df2f3d | ||
|
|
efd720b533 | ||
|
|
047f00a411 | ||
|
|
bb5ac8efbe | ||
|
|
e62bbf761b | ||
|
|
54a2e99d97 | ||
|
|
28230d93b4 | ||
|
|
3c4407442d | ||
|
|
caf318d499 | ||
|
|
2fbb504b66 | ||
|
|
2b58d1a46f | ||
|
|
1582a21408 | ||
|
|
229898dcee | ||
|
|
95194adfd5 | ||
|
|
4827496234 | ||
|
|
415eeca6cf | ||
|
|
58d9a3e1b5 | ||
|
|
cccadfa7ae | ||
|
|
1b52f8d2a5 | ||
|
|
2078ad68a5 | ||
|
|
368ed9e67d | ||
|
|
7c30993bb7 | ||
|
|
55b9a4ed30 | ||
|
|
118a8b949e | ||
|
|
1d14e30383 | ||
|
|
27714e29c3 | ||
|
|
9f8e1a1dc5 | ||
|
|
1692c6bd0a | ||
|
|
d233efbf63 | ||
|
|
e9a45a5a34 | ||
|
|
f6eb5c6983 | ||
|
|
2bf19787d5 | ||
|
|
0ea3a57ecb | ||
|
|
b353c730d8 | ||
|
|
173dfbd051 | ||
|
|
e3bceb9083 | ||
|
|
52c6b373cc | ||
|
|
0bc0f62277 | ||
|
|
12c8ee4b4b | ||
|
|
5240f9d1e5 | ||
|
|
997654d77d | ||
|
|
f1809451f6 | ||
|
|
84c650818e | ||
|
|
c5775cf73d | ||
|
|
dca482e058 | ||
|
|
6943169cef | ||
|
|
4fddec113c | ||
|
|
2114fd8f26 | ||
|
|
63bb6de491 | ||
|
|
0a56a168ff | ||
|
|
88e22087a8 | ||
|
|
9404ed703a | ||
|
|
c7ecccd5ca | ||
|
|
972e27a861 | ||
|
|
8f4ea77c07 | ||
|
|
61616ba864 | ||
|
|
9ed721a3f6 | ||
|
|
0b9d7fec0c | ||
|
|
240c15883f | ||
|
|
38864adc9c | ||
|
|
5991315990 | ||
|
|
73f0a67d98 | ||
|
|
ffe067d6e7 | ||
|
|
b5f563fb0f | ||
|
|
9310c7f3e2 | ||
|
|
1c1a8ef24b | ||
|
|
2cfbc2852d | ||
|
|
b167d30420 | ||
|
|
ec59760d9c | ||
|
|
076d3da825 | ||
|
|
c3eecbe933 | ||
|
|
d8e5b19ed4 | ||
|
|
43bc381e90 | ||
|
|
fb5ee22112 | ||
|
|
35327dad6f | ||
|
|
ef5e1909a0 | ||
|
|
bca5d8009e | ||
|
|
334f19c974 | ||
|
|
42a5bf1d9f | ||
|
|
71d1890316 | ||
|
|
d29c545627 | ||
|
|
eb85ecc9c4 | ||
|
|
0dc08e1e61 | ||
|
|
76532408ef | ||
|
|
60a4a8a86d | ||
|
|
a0d4c04687 | ||
|
|
f3874707ee | ||
|
|
f8c2689e77 | ||
|
|
8ec55ae20b | ||
|
|
fc1bf5f931 | ||
|
|
578d00666c | ||
|
|
f5c853b5c8 | ||
|
|
23c0cd2482 | ||
|
|
8217f361cc | ||
|
|
a0016e00d1 | ||
|
|
99c37028ee | ||
|
|
cfba337ef0 | ||
|
|
fd370fcad2 | ||
|
|
c680bb3254 | ||
|
|
7d5d6c041f | ||
|
|
bdc638530e | ||
|
|
315cee23a0 | ||
|
|
2135879dda | ||
|
|
da90069462 | ||
|
|
08c4854e00 | ||
|
|
a838add230 | ||
|
|
d68b091170 | ||
|
|
d809bed438 | ||
|
|
3aa1818870 | ||
|
|
96f6708461 | ||
|
|
6641a25f8c | ||
|
|
cd46ce916b | ||
|
|
318d1bb6f9 | ||
|
|
b8b53901e8 | ||
|
|
6e153781a7 | ||
|
|
f27c2d9760 | ||
|
|
eb91356e28 | ||
|
|
bed2971bf0 | ||
|
|
f0696dfe30 | ||
|
|
a43ed567ee | ||
|
|
fffdbb31f5 | ||
|
|
cacefb9a82 | ||
|
|
d966cef14c | ||
|
|
a551978a3f | ||
|
|
97752ca8fb | ||
|
|
8d5d332daf | ||
|
|
6b3a9bf26a | ||
|
|
c1d9a1e174 | ||
|
|
98120bb864 | ||
|
|
f8ced557e3 | ||
|
|
7b20139c6a |
@@ -2,7 +2,7 @@ version: "{build}"
|
|||||||
|
|
||||||
os: Windows Server 2012 R2
|
os: Windows Server 2012 R2
|
||||||
|
|
||||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
clone_folder: c:\gopath\src\github.com\rclone\rclone
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
- '%LocalAppData%\go-build'
|
- '%LocalAppData%\go-build'
|
||||||
@@ -16,7 +16,7 @@ environment:
|
|||||||
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||||
PATH: '%PATHCC64%'
|
PATH: '%PATHCC64%'
|
||||||
RCLONE_CONFIG_PASS:
|
RCLONE_CONFIG_PASS:
|
||||||
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
|
secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk=
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- choco install winfsp -y
|
- choco install winfsp -y
|
||||||
@@ -46,4 +46,4 @@ artifacts:
|
|||||||
- path: build/*-v*.zip
|
- path: build/*-v*.zip
|
||||||
|
|
||||||
deploy_script:
|
deploy_script:
|
||||||
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
machine: true
|
machine: true
|
||||||
|
|
||||||
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
|
working_directory: ~/.go_workspace/src/github.com/rclone/rclone
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
|
|||||||
7
.gitattributes
vendored
Normal file
7
.gitattributes
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# Ignore generated files in GitHub language statistics and diffs
|
||||||
|
/MANUAL.* linguist-generated=true
|
||||||
|
/rclone.1 linguist-generated=true
|
||||||
|
|
||||||
|
# Don't fiddle with the line endings of test data
|
||||||
|
**/testdata/** -text
|
||||||
|
**/test/** -text
|
||||||
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -10,7 +10,7 @@ instead of filing an issue for a quick response.
|
|||||||
|
|
||||||
If you are reporting a bug or asking for a new feature then please use one of the templates here:
|
If you are reporting a bug or asking for a new feature then please use one of the templates here:
|
||||||
|
|
||||||
https://github.com/ncw/rclone/issues/new
|
https://github.com/rclone/rclone/issues/new
|
||||||
|
|
||||||
otherwise fill in the form below.
|
otherwise fill in the form below.
|
||||||
|
|
||||||
|
|||||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,8 +22,8 @@ Link issues and relevant forum posts here.
|
|||||||
|
|
||||||
#### Checklist
|
#### Checklist
|
||||||
|
|
||||||
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||||
- [ ] I have added documentation for the changes if appropriate.
|
- [ ] I have added documentation for the changes if appropriate.
|
||||||
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||||
- [ ] I'm done, this Pull Request is ready for review :-)
|
- [ ] I'm done, this Pull Request is ready for review :-)
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -5,3 +5,6 @@ build
|
|||||||
docs/public
|
docs/public
|
||||||
rclone.iml
|
rclone.iml
|
||||||
.idea
|
.idea
|
||||||
|
.history
|
||||||
|
*.test
|
||||||
|
*.log
|
||||||
26
.golangci.yml
Normal file
26
.golangci.yml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# golangci-lint configuration options
|
||||||
|
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- deadcode
|
||||||
|
- errcheck
|
||||||
|
- goimports
|
||||||
|
- golint
|
||||||
|
- ineffassign
|
||||||
|
- structcheck
|
||||||
|
- varcheck
|
||||||
|
- govet
|
||||||
|
- unconvert
|
||||||
|
#- prealloc
|
||||||
|
#- maligned
|
||||||
|
disable-all: true
|
||||||
|
|
||||||
|
issues:
|
||||||
|
# Enable some lints excluded by default
|
||||||
|
exclude-use-default: false
|
||||||
|
|
||||||
|
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||||
|
max-per-linter: 0
|
||||||
|
|
||||||
|
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||||
|
max-same-issues: 0
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
{
|
|
||||||
"Enable": [
|
|
||||||
"deadcode",
|
|
||||||
"errcheck",
|
|
||||||
"goimports",
|
|
||||||
"golint",
|
|
||||||
"ineffassign",
|
|
||||||
"structcheck",
|
|
||||||
"varcheck",
|
|
||||||
"vet"
|
|
||||||
],
|
|
||||||
"EnableGC": true,
|
|
||||||
"Vendor": true
|
|
||||||
}
|
|
||||||
136
.travis.yml
136
.travis.yml
@@ -1,58 +1,128 @@
|
|||||||
|
---
|
||||||
language: go
|
language: go
|
||||||
sudo: required
|
sudo: required
|
||||||
dist: trusty
|
dist: xenial
|
||||||
os:
|
os:
|
||||||
- linux
|
- linux
|
||||||
go:
|
go_import_path: github.com/rclone/rclone
|
||||||
- 1.8.x
|
|
||||||
- 1.9.x
|
|
||||||
- 1.10.x
|
|
||||||
- 1.11.x
|
|
||||||
- tip
|
|
||||||
go_import_path: github.com/ncw/rclone
|
|
||||||
before_install:
|
before_install:
|
||||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
- git fetch --unshallow --tags
|
||||||
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
|
- |
|
||||||
|
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||||
|
sudo modprobe fuse
|
||||||
|
sudo chmod 666 /dev/fuse
|
||||||
|
sudo chown root:$USER /etc/fuse.conf
|
||||||
|
fi
|
||||||
|
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||||
|
brew update
|
||||||
|
brew tap caskroom/cask
|
||||||
|
brew cask install osxfuse
|
||||||
|
fi
|
||||||
|
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
|
||||||
|
choco install -y winfsp zip make
|
||||||
|
cd ../.. # fix crlf in git checkout
|
||||||
|
mv $TRAVIS_REPO_SLUG _old
|
||||||
|
git config --global core.autocrlf false
|
||||||
|
git clone _old $TRAVIS_REPO_SLUG
|
||||||
|
cd $TRAVIS_REPO_SLUG
|
||||||
|
fi
|
||||||
install:
|
install:
|
||||||
- git fetch --unshallow --tags
|
- make vars
|
||||||
- make vars
|
|
||||||
- make build_dep
|
|
||||||
script:
|
|
||||||
- make check
|
|
||||||
- make quicktest
|
|
||||||
- make compile_all
|
|
||||||
env:
|
env:
|
||||||
global:
|
global:
|
||||||
- GOTAGS=cmount
|
- GOTAGS=cmount
|
||||||
|
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
|
||||||
|
- GO111MODULE=off
|
||||||
|
- GITHUB_USER=ncw
|
||||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||||
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
|
|
||||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
packages:
|
packages:
|
||||||
- fuse
|
- fuse
|
||||||
- libfuse-dev
|
- libfuse-dev
|
||||||
- rpm
|
- rpm
|
||||||
- pkg-config
|
- pkg-config
|
||||||
cache:
|
cache:
|
||||||
directories:
|
directories:
|
||||||
- $HOME/.cache/go-build
|
- $HOME/.cache/go-build
|
||||||
matrix:
|
matrix:
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- go: tip
|
- go: tip
|
||||||
include:
|
include:
|
||||||
- os: osx
|
- go: 1.9.x
|
||||||
go: 1.11.x
|
script:
|
||||||
env: GOTAGS=""
|
- make quicktest
|
||||||
cache:
|
- go: 1.10.x
|
||||||
directories:
|
script:
|
||||||
- $HOME/Library/Caches/go-build
|
- make quicktest
|
||||||
|
- go: 1.11.x
|
||||||
|
script:
|
||||||
|
- make quicktest
|
||||||
|
- go: 1.12.x
|
||||||
|
name: Linux
|
||||||
|
env:
|
||||||
|
- GOTAGS=cmount
|
||||||
|
- BUILD_FLAGS='-include "^linux/"'
|
||||||
|
- DEPLOY=true
|
||||||
|
script:
|
||||||
|
- make build_dep
|
||||||
|
- make check
|
||||||
|
- make quicktest
|
||||||
|
- go: 1.12.x
|
||||||
|
name: Go Modules / Race
|
||||||
|
env:
|
||||||
|
- GO111MODULE=on
|
||||||
|
- GOPROXY=https://proxy.golang.org
|
||||||
|
script:
|
||||||
|
- make quicktest
|
||||||
|
- make racequicktest
|
||||||
|
- go: 1.12.x
|
||||||
|
name: Other OS
|
||||||
|
env:
|
||||||
|
- DEPLOY=true
|
||||||
|
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
|
||||||
|
script:
|
||||||
|
- make
|
||||||
|
- make compile_all
|
||||||
|
- go: 1.12.x
|
||||||
|
name: macOS
|
||||||
|
os: osx
|
||||||
|
env:
|
||||||
|
- GOTAGS= # cmount doesn't work on osx travis for some reason
|
||||||
|
- BUILD_FLAGS='-include "^darwin/" -cgo'
|
||||||
|
- DEPLOY=true
|
||||||
|
cache:
|
||||||
|
directories:
|
||||||
|
- $HOME/Library/Caches/go-build
|
||||||
|
script:
|
||||||
|
- make
|
||||||
|
- make quicktest
|
||||||
|
- make racequicktest
|
||||||
|
# - os: windows
|
||||||
|
# name: Windows
|
||||||
|
# go: 1.12.x
|
||||||
|
# env:
|
||||||
|
# - GOTAGS=cmount
|
||||||
|
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
|
||||||
|
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
|
||||||
|
# #filter_secrets: false # works around a problem with secrets under windows
|
||||||
|
# cache:
|
||||||
|
# directories:
|
||||||
|
# - ${LocalAppData}/go-build
|
||||||
|
# script:
|
||||||
|
# - make
|
||||||
|
# - make quicktest
|
||||||
|
# - make racequicktest
|
||||||
|
- go: tip
|
||||||
|
script:
|
||||||
|
- make quicktest
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
provider: script
|
provider: script
|
||||||
script: make travis_beta
|
script: make travis_beta
|
||||||
skip_cleanup: true
|
skip_cleanup: true
|
||||||
on:
|
on:
|
||||||
repo: ncw/rclone
|
repo: rclone/rclone
|
||||||
all_branches: true
|
all_branches: true
|
||||||
go: 1.11.x
|
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true
|
||||||
condition: $TRAVIS_PULL_REQUEST == false
|
|
||||||
|
|||||||
@@ -29,12 +29,12 @@ You'll need a Go environment set up with GOPATH set. See [the Go
|
|||||||
getting started docs](https://golang.org/doc/install) for more info.
|
getting started docs](https://golang.org/doc/install) for more info.
|
||||||
|
|
||||||
First in your web browser press the fork button on [rclone's GitHub
|
First in your web browser press the fork button on [rclone's GitHub
|
||||||
page](https://github.com/ncw/rclone).
|
page](https://github.com/rclone/rclone).
|
||||||
|
|
||||||
Now in your terminal
|
Now in your terminal
|
||||||
|
|
||||||
go get -u github.com/ncw/rclone
|
go get -u github.com/rclone/rclone
|
||||||
cd $GOPATH/src/github.com/ncw/rclone
|
cd $GOPATH/src/github.com/rclone/rclone
|
||||||
git remote rename origin upstream
|
git remote rename origin upstream
|
||||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||||
|
|
||||||
@@ -118,7 +118,7 @@ but they can be run against any of the remotes.
|
|||||||
|
|
||||||
cd fs/sync
|
cd fs/sync
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive:
|
||||||
go test -v -remote TestDrive: -subdir
|
go test -v -remote TestDrive: -fast-list
|
||||||
|
|
||||||
cd fs/operations
|
cd fs/operations
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive:
|
||||||
@@ -127,7 +127,7 @@ If you want to use the integration test framework to run these tests
|
|||||||
all together with an HTML report and test retries then from the
|
all together with an HTML report and test retries then from the
|
||||||
project root:
|
project root:
|
||||||
|
|
||||||
go install github.com/ncw/rclone/fstest/test_all
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
test_all -backend drive
|
test_all -backend drive
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
@@ -135,7 +135,7 @@ then change into the project root and run
|
|||||||
|
|
||||||
make test
|
make test
|
||||||
|
|
||||||
This command is run daily on the the integration test server. You can
|
This command is run daily on the integration test server. You can
|
||||||
find the results at https://pub.rclone.org/integration-tests/
|
find the results at https://pub.rclone.org/integration-tests/
|
||||||
|
|
||||||
## Code Organisation ##
|
## Code Organisation ##
|
||||||
@@ -362,9 +362,7 @@ Or if you want to run the integration tests manually:
|
|||||||
* `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
* `cd fs/sync`
|
* `cd fs/sync`
|
||||||
* `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
* If you are making a bucket based remote, then check with this also
|
* If your remote defines `ListR` check with this also
|
||||||
* `go test -v -remote TestRemote: -subdir`
|
|
||||||
* And if your remote defines `ListR` this also
|
|
||||||
* `go test -v -remote TestRemote: -fast-list`
|
* `go test -v -remote TestRemote: -fast-list`
|
||||||
|
|
||||||
See the [testing](#testing) section for more information on integration tests.
|
See the [testing](#testing) section for more information on integration tests.
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ The milestones have these meanings:
|
|||||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||||
|
|
||||||
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||||
|
|
||||||
## Closing Tickets ##
|
## Closing Tickets ##
|
||||||
|
|
||||||
|
|||||||
8709
MANUAL.html
generated
8709
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
9288
MANUAL.txt
generated
9288
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
73
Makefile
73
Makefile
@@ -1,5 +1,5 @@
|
|||||||
SHELL = bash
|
SHELL = bash
|
||||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD))
|
||||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||||
ifeq ($(BRANCH),$(LAST_TAG))
|
ifeq ($(BRANCH),$(LAST_TAG))
|
||||||
BRANCH := master
|
BRANCH := master
|
||||||
@@ -11,28 +11,30 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
|||||||
BRANCH_PATH :=
|
BRANCH_PATH :=
|
||||||
endif
|
endif
|
||||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||||
ifneq ($(TAG),$(LAST_TAG))
|
ifneq ($(TAG),$(LAST_TAG))
|
||||||
TAG := $(TAG)-beta
|
TAG := $(TAG)-beta
|
||||||
endif
|
endif
|
||||||
GO_VERSION := $(shell go version)
|
GO_VERSION := $(shell go version)
|
||||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||||
# Run full tests if go >= go1.11
|
ifdef BETA_SUBDIR
|
||||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
|
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
endif
|
||||||
|
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||||
ifdef GOTAGS
|
ifdef GOTAGS
|
||||||
BUILDTAGS=-tags "$(GOTAGS)"
|
BUILDTAGS=-tags "$(GOTAGS)"
|
||||||
|
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||||
endif
|
endif
|
||||||
|
|
||||||
.PHONY: rclone vars version
|
.PHONY: rclone vars version
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
touch fs/version.go
|
touch fs/version.go
|
||||||
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||||
cp -av `go env GOPATH`/bin/rclone .
|
cp -av `go env GOPATH`/bin/rclone .
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
@@ -42,7 +44,6 @@ vars:
|
|||||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||||
@echo FULL_TESTS="'$(FULL_TESTS)'"
|
|
||||||
@echo BETA_URL="'$(BETA_URL)'"
|
@echo BETA_URL="'$(BETA_URL)'"
|
||||||
|
|
||||||
version:
|
version:
|
||||||
@@ -50,45 +51,26 @@ version:
|
|||||||
|
|
||||||
# Full suite of integration tests
|
# Full suite of integration tests
|
||||||
test: rclone
|
test: rclone
|
||||||
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
|
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||||
-test_all 2>&1 | tee test_all.log
|
-test_all 2>&1 | tee test_all.log
|
||||||
@echo "Written logs in test_all.log"
|
@echo "Written logs in test_all.log"
|
||||||
|
|
||||||
# Quick test
|
# Quick test
|
||||||
quicktest:
|
quicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||||
ifdef FULL_TESTS
|
|
||||||
|
racequicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||||
endif
|
|
||||||
|
|
||||||
# Do source code quality checks
|
# Do source code quality checks
|
||||||
check: rclone
|
check: rclone
|
||||||
ifdef FULL_TESTS
|
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||||
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
@golangci-lint run $(LINTTAGS) ./...
|
||||||
errcheck $(BUILDTAGS) ./...
|
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
|
||||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl|ApplicationCredentialId)' ; test $$? -eq 1
|
|
||||||
else
|
|
||||||
@echo Skipping source quality tests as version of go too old
|
|
||||||
endif
|
|
||||||
|
|
||||||
gometalinter_install:
|
|
||||||
go get -u github.com/alecthomas/gometalinter
|
|
||||||
gometalinter --install --update
|
|
||||||
|
|
||||||
# We aren't using gometalinter as the default linter yet because
|
|
||||||
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
|
|
||||||
# 2. can't get -printfuncs working with the vet linter
|
|
||||||
gometalinter:
|
|
||||||
gometalinter ./...
|
|
||||||
|
|
||||||
# Get the build dependencies
|
# Get the build dependencies
|
||||||
build_dep:
|
build_dep:
|
||||||
ifdef FULL_TESTS
|
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||||
go get -u github.com/kisielk/errcheck
|
|
||||||
go get -u golang.org/x/tools/cmd/goimports
|
|
||||||
go get -u golang.org/x/lint/golint
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Get the release dependencies
|
# Get the release dependencies
|
||||||
release_dep:
|
release_dep:
|
||||||
@@ -116,10 +98,10 @@ MANUAL.txt: MANUAL.md
|
|||||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||||
|
|
||||||
commanddocs: rclone
|
commanddocs: rclone
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||||
|
|
||||||
backenddocs: rclone bin/make_backend_docs.py
|
backenddocs: rclone bin/make_backend_docs.py
|
||||||
./bin/make_backend_docs.py
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||||
|
|
||||||
rcdocs: rclone
|
rcdocs: rclone
|
||||||
bin/make_rc_docs.sh
|
bin/make_rc_docs.sh
|
||||||
@@ -172,11 +154,7 @@ log_since_last_release:
|
|||||||
git log $(LAST_TAG)..
|
git log $(LAST_TAG)..
|
||||||
|
|
||||||
compile_all:
|
compile_all:
|
||||||
ifdef FULL_TESTS
|
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
|
|
||||||
else
|
|
||||||
@echo Skipping compile all as version of go too old
|
|
||||||
endif
|
|
||||||
|
|
||||||
appveyor_upload:
|
appveyor_upload:
|
||||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
@@ -192,26 +170,21 @@ ifndef BRANCH_PATH
|
|||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||||
|
|
||||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
|
||||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
|
||||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
|
||||||
endif
|
|
||||||
|
|
||||||
travis_beta:
|
travis_beta:
|
||||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
|
||||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||||
endif
|
endif
|
||||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
|
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
ifndef BRANCH_PATH
|
ifndef BRANCH_PATH
|
||||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)
|
@echo Beta release ready at $(BETA_URL)
|
||||||
|
|
||||||
# Fetch the binary builds from travis and appveyor
|
# Fetch the binary builds from travis and appveyor
|
||||||
fetch_binaries:
|
fetch_binaries:
|
||||||
rclone -P sync $(BETA_UPLOAD) build/
|
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||||
|
|
||||||
serve: website
|
serve: website
|
||||||
cd docs && hugo server -v -w
|
cd docs && hugo server -v -w
|
||||||
|
|||||||
29
README.md
29
README.md
@@ -1,4 +1,4 @@
|
|||||||
[](https://rclone.org/)
|
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||||
|
|
||||||
[Website](https://rclone.org) |
|
[Website](https://rclone.org) |
|
||||||
[Documentation](https://rclone.org/docs/) |
|
[Documentation](https://rclone.org/docs/) |
|
||||||
@@ -6,13 +6,14 @@
|
|||||||
[Contributing](CONTRIBUTING.md) |
|
[Contributing](CONTRIBUTING.md) |
|
||||||
[Changelog](https://rclone.org/changelog/) |
|
[Changelog](https://rclone.org/changelog/) |
|
||||||
[Installation](https://rclone.org/install/) |
|
[Installation](https://rclone.org/install/) |
|
||||||
[Forum](https://forum.rclone.org/) |
|
[Forum](https://forum.rclone.org/)
|
||||||
[G+](https://google.com/+RcloneOrg)
|
|
||||||
|
|
||||||
[](https://travis-ci.org/ncw/rclone)
|
[](https://travis-ci.org/rclone/rclone)
|
||||||
[](https://ci.appveyor.com/project/ncw/rclone)
|
[](https://ci.appveyor.com/project/rclone/rclone)
|
||||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
[](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
|
||||||
[](https://godoc.org/github.com/ncw/rclone)
|
[](https://circleci.com/gh/rclone/rclone/tree/master)
|
||||||
|
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||||
|
[](https://godoc.org/github.com/rclone/rclone)
|
||||||
|
|
||||||
# Rclone
|
# Rclone
|
||||||
|
|
||||||
@@ -20,6 +21,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
|
|
||||||
## Storage providers
|
## Storage providers
|
||||||
|
|
||||||
|
* 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
|
||||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||||
@@ -32,10 +34,12 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
|
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||||
@@ -44,11 +48,12 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||||
* Openstack Swift [:page_facing_up:](https://rclone.org/swift/)
|
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||||
|
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
@@ -62,16 +67,18 @@ Please see [the full list of all storage providers and their features](https://r
|
|||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
* MD5/SHA1 hashes checked at all times for file integrity
|
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||||
* Timestamps preserved on files
|
* Timestamps preserved on files
|
||||||
* Partial syncs supported on a whole file basis
|
* Partial syncs supported on a whole file basis
|
||||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||||
* Can sync to and from network, eg two different cloud accounts
|
* Can sync to and from network, e.g. two different cloud accounts
|
||||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||||
|
* Multi-threaded downloads to local disk
|
||||||
|
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||||
|
|
||||||
## Installation & documentation
|
## Installation & documentation
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ Making a release
|
|||||||
* edit docs/content/changelog.md
|
* edit docs/content/changelog.md
|
||||||
* make doc
|
* make doc
|
||||||
* git status - to check for new man pages - git add them
|
* git status - to check for new man pages - git add them
|
||||||
* git commit -a -v -m "Version v1.XX"
|
* git commit -a -v -m "Version v1.XX.0"
|
||||||
* make retag
|
* make retag
|
||||||
* git push --tags origin master
|
* git push --tags origin master
|
||||||
* # Wait for the appveyor and travis builds to complete then...
|
* # Wait for the appveyor and travis builds to complete then...
|
||||||
@@ -27,6 +27,7 @@ Making a release
|
|||||||
|
|
||||||
Early in the next release cycle update the vendored dependencies
|
Early in the next release cycle update the vendored dependencies
|
||||||
* Review any pinned packages in go.mod and remove if possible
|
* Review any pinned packages in go.mod and remove if possible
|
||||||
|
* GO111MODULE=on go get -u github.com/spf13/cobra@master
|
||||||
* make update
|
* make update
|
||||||
* git status
|
* git status
|
||||||
* git add new files
|
* git add new files
|
||||||
|
|||||||
239
azure-pipelines.yml
Normal file
239
azure-pipelines.yml
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
---
|
||||||
|
# Azure pipelines build for rclone
|
||||||
|
# Parts stolen shamelessly from all round the Internet, especially Caddy
|
||||||
|
# -*- compile-command: "yamllint -f parsable azure-pipelines.yml" -*-
|
||||||
|
|
||||||
|
trigger:
|
||||||
|
branches:
|
||||||
|
include:
|
||||||
|
- '*'
|
||||||
|
tags:
|
||||||
|
include:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
variables:
|
||||||
|
GOROOT: $(gorootDir)/go
|
||||||
|
GOPATH: $(system.defaultWorkingDirectory)/gopath
|
||||||
|
GOCACHE: $(system.defaultWorkingDirectory)/gocache
|
||||||
|
GOBIN: $(GOPATH)/bin
|
||||||
|
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
|
||||||
|
GO111MODULE: 'off'
|
||||||
|
GOTAGS: cmount
|
||||||
|
GO_LATEST: false
|
||||||
|
CPATH: ''
|
||||||
|
GO_INSTALL_ARCH: amd64
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
linux:
|
||||||
|
imageName: ubuntu-16.04
|
||||||
|
gorootDir: /usr/local
|
||||||
|
GO_VERSION: latest
|
||||||
|
GOTAGS: cmount
|
||||||
|
BUILD_FLAGS: '-include "^linux/"'
|
||||||
|
MAKE_CHECK: true
|
||||||
|
MAKE_QUICKTEST: true
|
||||||
|
DEPLOY: true
|
||||||
|
mac:
|
||||||
|
imageName: macos-10.13
|
||||||
|
gorootDir: /usr/local
|
||||||
|
GO_VERSION: latest
|
||||||
|
GOTAGS: "" # cmount doesn't work on osx travis for some reason
|
||||||
|
BUILD_FLAGS: '-include "^darwin/" -cgo'
|
||||||
|
MAKE_QUICKTEST: true
|
||||||
|
MAKE_RACEQUICKTEST: true
|
||||||
|
DEPLOY: true
|
||||||
|
windows_amd64:
|
||||||
|
imageName: windows-2019
|
||||||
|
gorootDir: C:\
|
||||||
|
GO_VERSION: latest
|
||||||
|
BUILD_FLAGS: '-include "^windows/amd64" -cgo'
|
||||||
|
MAKE_QUICKTEST: true
|
||||||
|
DEPLOY: true
|
||||||
|
windows_386:
|
||||||
|
imageName: windows-2019
|
||||||
|
gorootDir: C:\
|
||||||
|
GO_VERSION: latest
|
||||||
|
GO_INSTALL_ARCH: 386
|
||||||
|
BUILD_FLAGS: '-include "^windows/386" -cgo'
|
||||||
|
MAKE_QUICKTEST: true
|
||||||
|
DEPLOY: true
|
||||||
|
other_os:
|
||||||
|
imageName: ubuntu-16.04
|
||||||
|
gorootDir: /usr/local
|
||||||
|
GO_VERSION: latest
|
||||||
|
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
|
||||||
|
MAKE_COMPILE_ALL: true
|
||||||
|
DEPLOY: true
|
||||||
|
modules_race:
|
||||||
|
imageName: ubuntu-16.04
|
||||||
|
gorootDir: /usr/local
|
||||||
|
GO_VERSION: latest
|
||||||
|
GO111MODULE: on
|
||||||
|
GOPROXY: https://proxy.golang.org
|
||||||
|
MAKE_QUICKTEST: true
|
||||||
|
MAKE_RACEQUICKTEST: true
|
||||||
|
go1.9:
|
||||||
|
imageName: ubuntu-16.04
|
||||||
|
gorootDir: /usr/local
|
||||||
|
GOCACHE: '' # build caching only came in go1.10
|
||||||
|
GO_VERSION: go1.9.7
|
||||||
|
MAKE_QUICKTEST: true
|
||||||
|
go1.10:
|
||||||
|
imageName: ubuntu-16.04
|
||||||
|
gorootDir: /usr/local
|
||||||
|
GO_VERSION: go1.10.8
|
||||||
|
MAKE_QUICKTEST: true
|
||||||
|
go1.11:
|
||||||
|
imageName: ubuntu-16.04
|
||||||
|
gorootDir: /usr/local
|
||||||
|
GO_VERSION: go1.11.12
|
||||||
|
MAKE_QUICKTEST: true
|
||||||
|
|
||||||
|
pool:
|
||||||
|
vmImage: $(imageName)
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- bash: |
|
||||||
|
latestGo=$(curl "https://golang.org/VERSION?m=text")
|
||||||
|
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
|
||||||
|
echo "##vso[task.setvariable variable=GO_LATEST]true"
|
||||||
|
echo "Latest Go version: $latestGo"
|
||||||
|
condition: eq( variables['GO_VERSION'], 'latest' )
|
||||||
|
continueOnError: false
|
||||||
|
displayName: "Get latest Go version"
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
sudo rm -f $(which go)
|
||||||
|
echo '##vso[task.prependpath]$(GOBIN)'
|
||||||
|
echo '##vso[task.prependpath]$(GOROOT)/bin'
|
||||||
|
mkdir -p '$(modulePath)'
|
||||||
|
shopt -s extglob
|
||||||
|
shopt -s dotglob
|
||||||
|
mv !(gopath) '$(modulePath)'
|
||||||
|
continueOnError: false
|
||||||
|
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
|
||||||
|
|
||||||
|
- task: CacheBeta@0
|
||||||
|
inputs:
|
||||||
|
key: go-build-cache | "$(Agent.JobName)"
|
||||||
|
path: $(GOCACHE)
|
||||||
|
continueOnError: true
|
||||||
|
displayName: Cache go build
|
||||||
|
condition: ne( variables['GOCACHE'], '' )
|
||||||
|
|
||||||
|
# Install Libraries (varies by platform)
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
sudo modprobe fuse
|
||||||
|
sudo chmod 666 /dev/fuse
|
||||||
|
sudo chown root:$USER /etc/fuse.conf
|
||||||
|
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||||
|
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||||
|
continueOnError: false
|
||||||
|
displayName: Install Libraries on Linux
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
brew update
|
||||||
|
brew tap caskroom/cask
|
||||||
|
brew cask install osxfuse
|
||||||
|
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||||
|
continueOnError: false
|
||||||
|
displayName: Install Libraries on macOS
|
||||||
|
|
||||||
|
- powershell: |
|
||||||
|
$ProgressPreference = 'SilentlyContinue'
|
||||||
|
choco install -y winfsp zip
|
||||||
|
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||||
|
if ($env:GO_INSTALL_ARCH -eq "386") {
|
||||||
|
choco install -y mingw --forcex86 --force
|
||||||
|
Write-Host "##vso[task.prependpath]C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||||
|
}
|
||||||
|
# Copy mingw32-make.exe to make.exe so the same command line
|
||||||
|
# can be used on Windows as on macOS and Linux
|
||||||
|
$path = (get-command mingw32-make.exe).Path
|
||||||
|
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
||||||
|
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||||
|
continueOnError: false
|
||||||
|
displayName: Install Libraries on Windows
|
||||||
|
|
||||||
|
|
||||||
|
# Install Go (this varies by platform)
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
wget "https://dl.google.com/go/$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
|
||||||
|
sudo mkdir $(gorootDir)
|
||||||
|
sudo chown ${USER}:${USER} $(gorootDir)
|
||||||
|
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
|
||||||
|
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||||
|
continueOnError: false
|
||||||
|
displayName: Install Go on Linux
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
wget "https://dl.google.com/go/$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
|
||||||
|
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
|
||||||
|
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||||
|
continueOnError: false
|
||||||
|
displayName: Install Go on macOS
|
||||||
|
|
||||||
|
- powershell: |
|
||||||
|
$ProgressPreference = 'SilentlyContinue'
|
||||||
|
Write-Host "Downloading Go $(GO_VERSION) for $(GO_INSTALL_ARCH)"
|
||||||
|
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip", "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip")
|
||||||
|
Write-Host "Extracting Go"
|
||||||
|
Expand-Archive "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip" -DestinationPath "$(gorootDir)"
|
||||||
|
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||||
|
continueOnError: false
|
||||||
|
displayName: Install Go on Windows
|
||||||
|
|
||||||
|
# Display environment for debugging
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
printf "Using go at: $(which go)\n"
|
||||||
|
printf "Go version: $(go version)\n"
|
||||||
|
printf "\n\nGo environment:\n\n"
|
||||||
|
go env
|
||||||
|
printf "\n\nRclone environment:\n\n"
|
||||||
|
make vars
|
||||||
|
printf "\n\nSystem environment:\n\n"
|
||||||
|
env
|
||||||
|
workingDirectory: '$(modulePath)'
|
||||||
|
displayName: Print Go version and environment
|
||||||
|
|
||||||
|
# Run Tests
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
make
|
||||||
|
make quicktest
|
||||||
|
workingDirectory: '$(modulePath)'
|
||||||
|
displayName: Run tests
|
||||||
|
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
make racequicktest
|
||||||
|
workingDirectory: '$(modulePath)'
|
||||||
|
displayName: Race test
|
||||||
|
condition: eq( variables['MAKE_RACEQUICKTEST'], 'true' )
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
make build_dep
|
||||||
|
make check
|
||||||
|
workingDirectory: '$(modulePath)'
|
||||||
|
displayName: Code quality test
|
||||||
|
condition: eq( variables['MAKE_CHECK'], 'true' )
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
make
|
||||||
|
make compile_all
|
||||||
|
workingDirectory: '$(modulePath)'
|
||||||
|
displayName: Compile all architectures test
|
||||||
|
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
|
||||||
|
|
||||||
|
- bash: |
|
||||||
|
make travis_beta
|
||||||
|
env:
|
||||||
|
RCLONE_CONFIG_PASS: $(RCLONE_CONFIG_PASS)
|
||||||
|
BETA_SUBDIR: 'azure_pipelines' # FIXME remove when removing travis/appveyor
|
||||||
|
workingDirectory: '$(modulePath)'
|
||||||
|
displayName: Deploy built binaries
|
||||||
|
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )
|
||||||
@@ -4,17 +4,17 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fsi := &fs.RegInfo{
|
fsi := &fs.RegInfo{
|
||||||
Name: "alias",
|
Name: "alias",
|
||||||
Description: "Alias for a existing remote",
|
Description: "Alias for an existing remote",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
@@ -30,7 +30,7 @@ type Options struct {
|
|||||||
Remote string `config:"remote"`
|
Remote string `config:"remote"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path.
|
// NewFs constructs an Fs from the path.
|
||||||
//
|
//
|
||||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
|||||||
@@ -1,15 +1,16 @@
|
|||||||
package alias
|
package alias
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
_ "github.com/ncw/rclone/backend/local" // pull in test backend
|
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -69,7 +70,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
prepare(t, remoteRoot)
|
prepare(t, remoteRoot)
|
||||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
gotEntries, err := f.List(test.fsList)
|
gotEntries, err := f.List(context.Background(), test.fsList)
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
|
|
||||||
sort.Sort(gotEntries)
|
sort.Sort(gotEntries)
|
||||||
@@ -80,7 +81,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
wantEntry := test.entries[i]
|
wantEntry := test.entries[i]
|
||||||
|
|
||||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||||
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
|
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||||
_, isDir := gotEntry.(fs.Directory)
|
_, isDir := gotEntry.(fs.Directory)
|
||||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,30 +2,35 @@ package all
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
// Active file systems
|
// Active file systems
|
||||||
_ "github.com/ncw/rclone/backend/alias"
|
_ "github.com/rclone/rclone/backend/alias"
|
||||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
_ "github.com/ncw/rclone/backend/azureblob"
|
_ "github.com/rclone/rclone/backend/azureblob"
|
||||||
_ "github.com/ncw/rclone/backend/b2"
|
_ "github.com/rclone/rclone/backend/b2"
|
||||||
_ "github.com/ncw/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/ncw/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/ncw/rclone/backend/crypt"
|
_ "github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/ncw/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
_ "github.com/ncw/rclone/backend/dropbox"
|
_ "github.com/rclone/rclone/backend/dropbox"
|
||||||
_ "github.com/ncw/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/fichier"
|
||||||
_ "github.com/ncw/rclone/backend/googlecloudstorage"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
_ "github.com/ncw/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/ncw/rclone/backend/hubic"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
_ "github.com/ncw/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/hubic"
|
||||||
_ "github.com/ncw/rclone/backend/mega"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
_ "github.com/ncw/rclone/backend/onedrive"
|
_ "github.com/rclone/rclone/backend/koofr"
|
||||||
_ "github.com/ncw/rclone/backend/opendrive"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
_ "github.com/ncw/rclone/backend/pcloud"
|
_ "github.com/rclone/rclone/backend/mega"
|
||||||
_ "github.com/ncw/rclone/backend/qingstor"
|
_ "github.com/rclone/rclone/backend/onedrive"
|
||||||
_ "github.com/ncw/rclone/backend/s3"
|
_ "github.com/rclone/rclone/backend/opendrive"
|
||||||
_ "github.com/ncw/rclone/backend/sftp"
|
_ "github.com/rclone/rclone/backend/pcloud"
|
||||||
_ "github.com/ncw/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||||
_ "github.com/ncw/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/putio"
|
||||||
_ "github.com/ncw/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/qingstor"
|
||||||
_ "github.com/ncw/rclone/backend/yandex"
|
_ "github.com/rclone/rclone/backend/s3"
|
||||||
|
_ "github.com/rclone/rclone/backend/sftp"
|
||||||
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
|
_ "github.com/rclone/rclone/backend/yandex"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ we ignore assets completely!
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -22,18 +23,17 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
acd "github.com/ncw/go-acd"
|
acd "github.com/ncw/go-acd"
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/config"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/lib/dircache"
|
|
||||||
"github.com/ncw/rclone/lib/oauthutil"
|
|
||||||
"github.com/ncw/rclone/lib/pacer"
|
|
||||||
"github.com/ncw/rclone/lib/rest"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/dircache"
|
||||||
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -155,7 +155,7 @@ type Fs struct {
|
|||||||
noAuthClient *http.Client // unauthenticated http client
|
noAuthClient *http.Client // unauthenticated http client
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *pacer.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
trueRootID string // ID of true root directory
|
trueRootID string // ID of true root directory
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
}
|
}
|
||||||
@@ -247,6 +247,7 @@ func filterRequest(req *http.Request) {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -273,7 +274,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
c: c,
|
c: c,
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||||
noAuthClient: fshttp.NewClient(fs.Config),
|
noAuthClient: fshttp.NewClient(fs.Config),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -308,7 +309,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.dirCache = dircache.New(root, f.trueRootID, f)
|
f.dirCache = dircache.New(root, f.trueRootID, f)
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Assume it is a file
|
// Assume it is a file
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
@@ -316,12 +317,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
||||||
tempF.root = newRoot
|
tempF.root = newRoot
|
||||||
// Make new Fs which is the parent
|
// Make new Fs which is the parent
|
||||||
err = tempF.dirCache.FindRoot(false)
|
err = tempF.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@@ -331,7 +332,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
// XXX: update the old f here instead of returning tempF, since
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
// `features` were already filled with functions having *f as a receiver.
|
||||||
// See https://github.com/ncw/rclone/issues/2182
|
// See https://github.com/rclone/rclone/issues/2182
|
||||||
f.dirCache = tempF.dirCache
|
f.dirCache = tempF.dirCache
|
||||||
f.root = tempF.root
|
f.root = tempF.root
|
||||||
// return an error with an fs which points to the parent
|
// return an error with an fs which points to the parent
|
||||||
@@ -353,7 +354,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@@ -362,7 +363,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error)
|
|||||||
// Set info but not meta
|
// Set info but not meta
|
||||||
o.info = info
|
o.info = info
|
||||||
} else {
|
} else {
|
||||||
err := o.readMetaData() // reads info and meta, returning an error
|
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -372,12 +373,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error)
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@@ -404,7 +405,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||||
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
||||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@@ -502,12 +503,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -525,7 +526,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
d := fs.NewDir(remote, when).SetID(*node.Id)
|
d := fs.NewDir(remote, when).SetID(*node.Id)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
case fileKind:
|
case fileKind:
|
||||||
o, err := f.newObjectWithInfo(remote, node)
|
o, err := f.newObjectWithInfo(ctx, remote, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@@ -569,7 +570,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// At the end of large uploads. The speculation is that the timeout
|
// At the end of large uploads. The speculation is that the timeout
|
||||||
// is waiting for the sha1 hashing to complete and the file may well
|
// is waiting for the sha1 hashing to complete and the file may well
|
||||||
// be properly uploaded.
|
// be properly uploaded.
|
||||||
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
|
func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
|
||||||
// Return if no error - all is well
|
// Return if no error - all is well
|
||||||
if inErr == nil {
|
if inErr == nil {
|
||||||
return false, inInfo, inErr
|
return false, inInfo, inErr
|
||||||
@@ -609,7 +610,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
|||||||
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
|
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
for i := 1; i <= retries; i++ {
|
for i := 1; i <= retries; i++ {
|
||||||
o, err := f.NewObject(remote)
|
o, err := f.NewObject(ctx, remote)
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
|
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@@ -635,7 +636,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
@@ -644,17 +645,17 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
// Check if object already exists
|
// Check if object already exists
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
default:
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// If not create it
|
// If not create it
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -670,7 +671,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
info, resp, err = folder.Put(in, leaf)
|
info, resp, err = folder.Put(in, leaf)
|
||||||
f.tokenRenewer.Stop()
|
f.tokenRenewer.Stop()
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start))
|
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||||
if ok {
|
if ok {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@@ -684,13 +685,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
err := f.dirCache.FindRoot(true)
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
_, err = f.dirCache.FindDir(dir, true)
|
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -704,7 +705,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
|
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -713,15 +714,15 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create the destination directory if necessary
|
// create the destination directory if necessary
|
||||||
err := f.dirCache.FindRoot(true)
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
|
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
|
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -737,12 +738,12 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
srcErr, dstErr error
|
srcErr, dstErr error
|
||||||
)
|
)
|
||||||
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
||||||
_, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object
|
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
||||||
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
||||||
// exit if error on source
|
// exit if error on source
|
||||||
return nil, srcErr
|
return nil, srcErr
|
||||||
}
|
}
|
||||||
dstObj, dstErr = f.NewObject(remote)
|
dstObj, dstErr = f.NewObject(ctx, remote)
|
||||||
if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
|
if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
|
||||||
// exit if error on dst
|
// exit if error on dst
|
||||||
return nil, dstErr
|
return nil, dstErr
|
||||||
@@ -771,7 +772,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "DirMove error: not same remote type")
|
fs.Debugf(src, "DirMove error: not same remote type")
|
||||||
@@ -787,14 +788,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the root src directory
|
// find the root src directory
|
||||||
err = srcFs.dirCache.FindRoot(false)
|
err = srcFs.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root dst directory
|
// find the root dst directory
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
err = f.dirCache.FindRoot(true)
|
err = f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -809,14 +810,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
findPath = f.root
|
findPath = f.root
|
||||||
}
|
}
|
||||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true)
|
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check destination does not exist
|
// Check destination does not exist
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@@ -832,7 +833,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
if srcRemote == "" {
|
if srcRemote == "" {
|
||||||
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
||||||
} else {
|
} else {
|
||||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false)
|
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -840,7 +841,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
srcLeaf, _ := dircache.SplitPath(srcPath)
|
srcLeaf, _ := dircache.SplitPath(srcPath)
|
||||||
|
|
||||||
// Find ID of src
|
// Find ID of src
|
||||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -873,17 +874,17 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
|
|
||||||
// purgeCheck remotes the root directory, if check is set then it
|
// purgeCheck remotes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(false)
|
err := dc.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rootID, err := dc.FindDir(dir, false)
|
rootID, err := dc.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -932,8 +933,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@@ -955,7 +956,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
// srcObj, ok := src.(*Object)
|
// srcObj, ok := src.(*Object)
|
||||||
// if !ok {
|
// if !ok {
|
||||||
// fs.Debugf(src, "Can't copy - not same remote type")
|
// fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@@ -966,7 +967,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// if err != nil {
|
// if err != nil {
|
||||||
// return nil, err
|
// return nil, err
|
||||||
// }
|
// }
|
||||||
// return f.NewObject(remote), nil
|
// return f.NewObject(ctx, remote), nil
|
||||||
//}
|
//}
|
||||||
|
|
||||||
// Purge deletes all the files and the container
|
// Purge deletes all the files and the container
|
||||||
@@ -974,8 +975,8 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck(ctx, "", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -999,7 +1000,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -1022,11 +1023,11 @@ func (o *Object) Size() int64 {
|
|||||||
// it also sets the info
|
// it also sets the info
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.info != nil {
|
if o.info != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
|
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
@@ -1055,8 +1056,8 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
@@ -1070,7 +1071,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// FIXME not implemented
|
// FIXME not implemented
|
||||||
return fs.ErrorCantSetModTime
|
return fs.ErrorCantSetModTime
|
||||||
}
|
}
|
||||||
@@ -1081,7 +1082,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
||||||
if bigObject {
|
if bigObject {
|
||||||
fs.Debugf(o, "Downloading large object via tempLink")
|
fs.Debugf(o, "Downloading large object via tempLink")
|
||||||
@@ -1093,7 +1094,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
if !bigObject {
|
if !bigObject {
|
||||||
in, resp, err = file.OpenHeaders(headers)
|
in, resp, err = file.OpenHeaders(headers)
|
||||||
} else {
|
} else {
|
||||||
in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
|
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
|
||||||
}
|
}
|
||||||
return o.fs.shouldRetry(resp, err)
|
return o.fs.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
@@ -1103,7 +1104,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
file := acd.File{Node: o.info}
|
file := acd.File{Node: o.info}
|
||||||
var info *acd.File
|
var info *acd.File
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@@ -1114,7 +1115,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
info, resp, err = file.Overwrite(in)
|
info, resp, err = file.Overwrite(in)
|
||||||
o.fs.tokenRenewer.Stop()
|
o.fs.tokenRenewer.Stop()
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start))
|
ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||||
if ok {
|
if ok {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@@ -1139,7 +1140,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return o.fs.removeNode(o.info)
|
return o.fs.removeNode(o.info)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1261,7 +1262,7 @@ OnConflict:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
|
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
|
||||||
return *o.info.ContentProperties.ContentType
|
return *o.info.ContentProperties.ContentType
|
||||||
}
|
}
|
||||||
@@ -1274,7 +1275,7 @@ func (o *Object) MimeType() string {
|
|||||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||||
//
|
//
|
||||||
// Close the returned channel to stop being notified.
|
// Close the returned channel to stop being notified.
|
||||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||||
checkpoint := f.opt.Checkpoint
|
checkpoint := f.opt.Checkpoint
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ package amazonclouddrive_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/amazonclouddrive"
|
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||||
|
|
||||||
// +build !plan9,!solaris,go1.8
|
// +build !plan9,!solaris
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
@@ -16,7 +16,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -24,16 +23,17 @@ import (
|
|||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/accounting"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/fs/walk"
|
|
||||||
"github.com/ncw/rclone/lib/pacer"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/walk"
|
||||||
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -53,6 +53,11 @@ const (
|
|||||||
maxUploadCutoff = 256 * fs.MebiByte
|
maxUploadCutoff = 256 * fs.MebiByte
|
||||||
defaultAccessTier = azblob.AccessTierNone
|
defaultAccessTier = azblob.AccessTierNone
|
||||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||||
|
// Default storage account, key and blob endpoint for emulator support,
|
||||||
|
// though it is a base64 key checked in here, it is publicly available secret.
|
||||||
|
emulatorAccount = "devstoreaccount1"
|
||||||
|
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||||
|
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -63,13 +68,17 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "account",
|
Name: "account",
|
||||||
Help: "Storage Account Name (leave blank to use connection string or SAS URL)",
|
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
|
||||||
}, {
|
}, {
|
||||||
Name: "key",
|
Name: "key",
|
||||||
Help: "Storage Account Key (leave blank to use connection string or SAS URL)",
|
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
|
||||||
}, {
|
}, {
|
||||||
Name: "sas_url",
|
Name: "sas_url",
|
||||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)",
|
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
|
||||||
|
}, {
|
||||||
|
Name: "use_emulator",
|
||||||
|
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
|
||||||
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for the service\nLeave blank normally.",
|
Help: "Endpoint for the service\nLeave blank normally.",
|
||||||
@@ -77,7 +86,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
||||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
Default: defaultUploadCutoff,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
@@ -85,7 +94,7 @@ func init() {
|
|||||||
|
|
||||||
Note that this is stored in memory and there may be up to
|
Note that this is stored in memory and there may be up to
|
||||||
"--transfers" chunks stored at once in memory.`,
|
"--transfers" chunks stored at once in memory.`,
|
||||||
Default: fs.SizeSuffix(defaultChunkSize),
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "list_chunk",
|
Name: "list_chunk",
|
||||||
@@ -129,23 +138,25 @@ type Options struct {
|
|||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
ListChunkSize uint `config:"list_chunk"`
|
ListChunkSize uint `config:"list_chunk"`
|
||||||
AccessTier string `config:"access_tier"`
|
AccessTier string `config:"access_tier"`
|
||||||
|
UseEmulator bool `config:"use_emulator"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote azure server
|
// Fs represents a remote azure server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
opt Options // parsed config options
|
opt Options // parsed config options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
client *http.Client // http client we are using
|
client *http.Client // http client we are using
|
||||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||||
cntURL *azblob.ContainerURL // reference to containerURL
|
cntURLcacheMu sync.Mutex // mutex to protect cntURLcache
|
||||||
container string // the container we are working on
|
cntURLcache map[string]*azblob.ContainerURL // reference to containerURL per container
|
||||||
containerOKMu sync.Mutex // mutex to protect container OK
|
rootContainer string // container part of root (if any)
|
||||||
containerOK bool // true if we have created the container
|
rootDirectory string // directory part of root (if any)
|
||||||
containerDeleted bool // true if we have deleted the container
|
isLimited bool // if limited to one container
|
||||||
pacer *pacer.Pacer // To pace and retry the API calls
|
cache *bucket.Cache // cache for container creation status
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
pacer *fs.Pacer // To pace and retry the API calls
|
||||||
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a azure object
|
// Object describes a azure object
|
||||||
@@ -169,18 +180,18 @@ func (f *Fs) Name() string {
|
|||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
// Root of the remote (as passed into NewFs)
|
||||||
func (f *Fs) Root() string {
|
func (f *Fs) Root() string {
|
||||||
if f.root == "" {
|
return f.root
|
||||||
return f.container
|
|
||||||
}
|
|
||||||
return f.container + "/" + f.root
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// String converts this Fs to a string
|
// String converts this Fs to a string
|
||||||
func (f *Fs) String() string {
|
func (f *Fs) String() string {
|
||||||
if f.root == "" {
|
if f.rootContainer == "" {
|
||||||
return fmt.Sprintf("Azure container %s", f.container)
|
return fmt.Sprintf("Azure root")
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("Azure container %s path %s", f.container, f.root)
|
if f.rootDirectory == "" {
|
||||||
|
return fmt.Sprintf("Azure container %s", f.rootContainer)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
// Features returns the optional features of this Fs
|
||||||
@@ -188,21 +199,23 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pattern to match a azure path
|
// parsePath parses a remote 'url'
|
||||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
func parsePath(path string) (root string) {
|
||||||
|
root = strings.Trim(path, "/")
|
||||||
// parseParse parses a azure 'url'
|
|
||||||
func parsePath(path string) (container, directory string, err error) {
|
|
||||||
parts := matcher.FindStringSubmatch(path)
|
|
||||||
if parts == nil {
|
|
||||||
err = errors.Errorf("couldn't find container in azure path %q", path)
|
|
||||||
} else {
|
|
||||||
container, directory = parts[1], parts[2]
|
|
||||||
directory = strings.Trim(directory, "/")
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// split returns container and containerPath from the rootRelativePath
|
||||||
|
// relative to f.root
|
||||||
|
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||||
|
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
// split returns container and containerPath from the object
|
||||||
|
func (o *Object) split() (container, containerPath string) {
|
||||||
|
return o.fs.split(o.remote)
|
||||||
|
}
|
||||||
|
|
||||||
// validateAccessTier checks if azureblob supports user supplied tier
|
// validateAccessTier checks if azureblob supports user supplied tier
|
||||||
func validateAccessTier(tier string) bool {
|
func validateAccessTier(tier string) bool {
|
||||||
switch tier {
|
switch tier {
|
||||||
@@ -307,8 +320,15 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
|
|||||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// setRoot changes the root of the Fs
|
||||||
|
func (f *Fs) setRoot(root string) {
|
||||||
|
f.root = parsePath(root)
|
||||||
|
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -327,10 +347,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if opt.ListChunkSize > maxListChunkSize {
|
if opt.ListChunkSize > maxListChunkSize {
|
||||||
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||||
}
|
}
|
||||||
container, directory, err := parsePath(root)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if opt.Endpoint == "" {
|
if opt.Endpoint == "" {
|
||||||
opt.Endpoint = storageDefaultBaseURL
|
opt.Endpoint = storageDefaultBaseURL
|
||||||
}
|
}
|
||||||
@@ -345,26 +361,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
container: container,
|
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
root: directory,
|
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
|
|
||||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||||
client: fshttp.NewClient(fs.Config),
|
client: fshttp.NewClient(fs.Config),
|
||||||
|
cache: bucket.NewCache(),
|
||||||
|
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
|
||||||
}
|
}
|
||||||
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
SetTier: true,
|
BucketBasedRootOK: true,
|
||||||
GetTier: true,
|
SetTier: true,
|
||||||
|
GetTier: true,
|
||||||
}).Fill(f)
|
}).Fill(f)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
u *url.URL
|
u *url.URL
|
||||||
serviceURL azblob.ServiceURL
|
serviceURL azblob.ServiceURL
|
||||||
containerURL azblob.ContainerURL
|
|
||||||
)
|
)
|
||||||
switch {
|
switch {
|
||||||
|
case opt.UseEmulator:
|
||||||
|
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
||||||
|
}
|
||||||
|
u, err = url.Parse(emulatorBlobEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||||
|
}
|
||||||
|
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||||
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||||
case opt.Account != "" && opt.Key != "":
|
case opt.Account != "" && opt.Key != "":
|
||||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -377,7 +405,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||||
containerURL = serviceURL.NewContainerURL(container)
|
|
||||||
case opt.SASURL != "":
|
case opt.SASURL != "":
|
||||||
u, err = url.Parse(opt.SASURL)
|
u, err = url.Parse(opt.SASURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -388,38 +415,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// Check if we have container level SAS or account level sas
|
// Check if we have container level SAS or account level sas
|
||||||
parts := azblob.NewBlobURLParts(*u)
|
parts := azblob.NewBlobURLParts(*u)
|
||||||
if parts.ContainerName != "" {
|
if parts.ContainerName != "" {
|
||||||
if container != "" && parts.ContainerName != container {
|
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
|
||||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||||
}
|
}
|
||||||
|
containerURL := azblob.NewContainerURL(*u, pipeline)
|
||||||
container = parts.ContainerName
|
f.cntURLcache[parts.ContainerName] = &containerURL
|
||||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
f.isLimited = true
|
||||||
} else {
|
} else {
|
||||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||||
containerURL = serviceURL.NewContainerURL(container)
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||||
}
|
}
|
||||||
f.svcURL = &serviceURL
|
f.svcURL = &serviceURL
|
||||||
f.cntURL = &containerURL
|
|
||||||
|
|
||||||
if f.root != "" {
|
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||||
f.root += "/"
|
|
||||||
// Check to see if the (container,directory) is actually an existing file
|
// Check to see if the (container,directory) is actually an existing file
|
||||||
oldRoot := f.root
|
oldRoot := f.root
|
||||||
remote := path.Base(directory)
|
newRoot, leaf := path.Split(oldRoot)
|
||||||
f.root = path.Dir(directory)
|
f.setRoot(newRoot)
|
||||||
if f.root == "." {
|
_, err := f.NewObject(ctx, leaf)
|
||||||
f.root = ""
|
|
||||||
} else {
|
|
||||||
f.root += "/"
|
|
||||||
}
|
|
||||||
_, err := f.NewObject(remote)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||||
// File doesn't exist or is a directory so return old f
|
// File doesn't exist or is a directory so return old f
|
||||||
f.root = oldRoot
|
f.setRoot(oldRoot)
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -430,6 +449,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// return the container URL for the container passed in
|
||||||
|
func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
|
||||||
|
f.cntURLcacheMu.Lock()
|
||||||
|
defer f.cntURLcacheMu.Unlock()
|
||||||
|
var ok bool
|
||||||
|
if containerURL, ok = f.cntURLcache[container]; !ok {
|
||||||
|
cntURL := f.svcURL.NewContainerURL(container)
|
||||||
|
containerURL = &cntURL
|
||||||
|
f.cntURLcache[container] = containerURL
|
||||||
|
}
|
||||||
|
return containerURL
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
@@ -454,13 +487,13 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object,
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBlobReference creates an empty blob reference with no metadata
|
// getBlobReference creates an empty blob reference with no metadata
|
||||||
func (f *Fs) getBlobReference(remote string) azblob.BlobURL {
|
func (f *Fs) getBlobReference(container, containerPath string) azblob.BlobURL {
|
||||||
return f.cntURL.NewBlobURL(f.root + remote)
|
return f.cntURL(container).NewBlobURL(containerPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
||||||
@@ -496,16 +529,18 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
|||||||
// the container and root supplied
|
// the container and root supplied
|
||||||
//
|
//
|
||||||
// dir is the starting directory, "" for root
|
// dir is the starting directory, "" for root
|
||||||
func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
//
|
||||||
f.containerOKMu.Lock()
|
// The remote has prefix removed from it and if addContainer is set then
|
||||||
deleted := f.containerDeleted
|
// it adds the container to the start.
|
||||||
f.containerOKMu.Unlock()
|
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, maxResults uint, fn listFn) error {
|
||||||
if deleted {
|
if f.cache.IsDeleted(container) {
|
||||||
return fs.ErrorDirNotFound
|
return fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
root := f.root
|
if prefix != "" {
|
||||||
if dir != "" {
|
prefix += "/"
|
||||||
root += dir + "/"
|
}
|
||||||
|
if directory != "" {
|
||||||
|
directory += "/"
|
||||||
}
|
}
|
||||||
delimiter := ""
|
delimiter := ""
|
||||||
if !recurse {
|
if !recurse {
|
||||||
@@ -520,16 +555,14 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
|||||||
UncommittedBlobs: false,
|
UncommittedBlobs: false,
|
||||||
Deleted: false,
|
Deleted: false,
|
||||||
},
|
},
|
||||||
Prefix: root,
|
Prefix: directory,
|
||||||
MaxResults: int32(maxResults),
|
MaxResults: int32(maxResults),
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
|
||||||
directoryMarkers := map[string]struct{}{}
|
|
||||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -549,26 +582,17 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
|||||||
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||||
// return nil
|
// return nil
|
||||||
// }
|
// }
|
||||||
if !strings.HasPrefix(file.Name, f.root) {
|
if !strings.HasPrefix(file.Name, prefix) {
|
||||||
fs.Debugf(f, "Odd name received %q", file.Name)
|
fs.Debugf(f, "Odd name received %q", file.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
remote := file.Name[len(f.root):]
|
remote := file.Name[len(prefix):]
|
||||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||||
if strings.HasSuffix(remote, "/") {
|
|
||||||
remote = remote[:len(remote)-1]
|
|
||||||
}
|
|
||||||
err = fn(remote, file, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Keep track of directory markers. If recursing then
|
|
||||||
// there will be no Prefixes so no need to keep track
|
|
||||||
if !recurse {
|
|
||||||
directoryMarkers[remote] = struct{}{}
|
|
||||||
}
|
|
||||||
continue // skip directory marker
|
continue // skip directory marker
|
||||||
}
|
}
|
||||||
|
if addContainer {
|
||||||
|
remote = path.Join(container, remote)
|
||||||
|
}
|
||||||
// Send object
|
// Send object
|
||||||
err = fn(remote, file, false)
|
err = fn(remote, file, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -578,14 +602,13 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
|||||||
// Send the subdirectories
|
// Send the subdirectories
|
||||||
for _, remote := range response.Segment.BlobPrefixes {
|
for _, remote := range response.Segment.BlobPrefixes {
|
||||||
remote := strings.TrimRight(remote.Name, "/")
|
remote := strings.TrimRight(remote.Name, "/")
|
||||||
if !strings.HasPrefix(remote, f.root) {
|
if !strings.HasPrefix(remote, prefix) {
|
||||||
fs.Debugf(f, "Odd directory name received %q", remote)
|
fs.Debugf(f, "Odd directory name received %q", remote)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
remote = remote[len(f.root):]
|
remote = remote[len(prefix):]
|
||||||
// Don't send if already sent as a directory marker
|
if addContainer {
|
||||||
if _, found := directoryMarkers[remote]; found {
|
remote = path.Join(container, remote)
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
// Send object
|
// Send object
|
||||||
err = fn(remote, nil, true)
|
err = fn(remote, nil, true)
|
||||||
@@ -610,19 +633,9 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
|
|||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// mark the container as being OK
|
|
||||||
func (f *Fs) markContainerOK() {
|
|
||||||
if f.container != "" {
|
|
||||||
f.containerOKMu.Lock()
|
|
||||||
f.containerOK = true
|
|
||||||
f.containerDeleted = false
|
|
||||||
f.containerOKMu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||||
err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -636,17 +649,24 @@ func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// container must be present if listing succeeded
|
// container must be present if listing succeeded
|
||||||
f.markContainerOK()
|
f.cache.MarkOK(container)
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// listContainers returns all the containers to out
|
// listContainers returns all the containers to out
|
||||||
func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
if dir != "" {
|
if f.isLimited {
|
||||||
return nil, fs.ErrorListBucketRequired
|
f.cntURLcacheMu.Lock()
|
||||||
|
for container := range f.cntURLcache {
|
||||||
|
d := fs.NewDir(container, time.Time{})
|
||||||
|
entries = append(entries, d)
|
||||||
|
}
|
||||||
|
f.cntURLcacheMu.Unlock()
|
||||||
|
return entries, nil
|
||||||
}
|
}
|
||||||
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
|
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
|
||||||
d := fs.NewDir(container.Name, container.Properties.LastModified)
|
d := fs.NewDir(container.Name, container.Properties.LastModified)
|
||||||
|
f.cache.MarkOK(container.Name)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -665,11 +685,15 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if f.container == "" {
|
container, directory := f.split(dir)
|
||||||
return f.listContainers(dir)
|
if container == "" {
|
||||||
|
if directory != "" {
|
||||||
|
return nil, fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
|
return f.listContainers(ctx)
|
||||||
}
|
}
|
||||||
return f.listDir(dir)
|
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -688,23 +712,44 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
if f.container == "" {
|
container, directory := f.split(dir)
|
||||||
return fs.ErrorListBucketRequired
|
|
||||||
}
|
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||||
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return list.Add(entry)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if container == "" {
|
||||||
|
entries, err := f.listContainers(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return list.Add(entry)
|
for _, entry := range entries {
|
||||||
})
|
err = list.Add(entry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
container := entry.Remote()
|
||||||
|
err = listR(container, "", f.rootDirectory, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// container must be present if listing succeeded
|
||||||
|
f.cache.MarkOK(container)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// container must be present if listing succeeded
|
||||||
|
f.cache.MarkOK(container)
|
||||||
}
|
}
|
||||||
// container must be present if listing succeeded
|
|
||||||
f.markContainerOK()
|
|
||||||
return list.Flush()
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -745,95 +790,52 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
fs := &Object{
|
fs := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return fs, fs.Update(in, src, options...)
|
return fs, fs.Update(ctx, in, src, options...)
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the container exists
|
|
||||||
//
|
|
||||||
// NB this can return incorrect results if called immediately after container deletion
|
|
||||||
func (f *Fs) dirExists() (bool, error) {
|
|
||||||
options := azblob.ListBlobsSegmentOptions{
|
|
||||||
Details: azblob.BlobListingDetails{
|
|
||||||
Copy: false,
|
|
||||||
Metadata: false,
|
|
||||||
Snapshots: false,
|
|
||||||
UncommittedBlobs: false,
|
|
||||||
Deleted: false,
|
|
||||||
},
|
|
||||||
MaxResults: 1,
|
|
||||||
}
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
ctx := context.Background()
|
|
||||||
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
|
|
||||||
return f.shouldRetry(err)
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
|
||||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
f.containerOKMu.Lock()
|
container, _ := f.split(dir)
|
||||||
defer f.containerOKMu.Unlock()
|
return f.makeContainer(ctx, container)
|
||||||
if f.containerOK {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !f.containerDeleted {
|
|
||||||
exists, err := f.dirExists()
|
|
||||||
if err == nil {
|
|
||||||
f.containerOK = exists
|
|
||||||
}
|
|
||||||
if err != nil || exists {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// now try to create the container
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
ctx := context.Background()
|
|
||||||
_, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
|
||||||
if err != nil {
|
|
||||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
|
||||||
switch storageErr.ServiceCode() {
|
|
||||||
case azblob.ServiceCodeContainerAlreadyExists:
|
|
||||||
f.containerOK = true
|
|
||||||
return false, nil
|
|
||||||
case azblob.ServiceCodeContainerBeingDeleted:
|
|
||||||
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
|
||||||
// When a container is deleted, a container with the same name cannot be created
|
|
||||||
// for at least 30 seconds; the container may not be available for more than 30
|
|
||||||
// seconds if the service is still processing the request.
|
|
||||||
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
|
||||||
f.containerDeleted = true
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return f.shouldRetry(err)
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
f.containerOK = true
|
|
||||||
f.containerDeleted = false
|
|
||||||
}
|
|
||||||
return errors.Wrap(err, "failed to make container")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
// makeContainer creates the container if it doesn't exist
|
||||||
func (f *Fs) isEmpty(dir string) (err error) {
|
func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||||
|
return f.cache.Create(container, func() error {
|
||||||
|
// now try to create the container
|
||||||
|
return f.pacer.Call(func() (bool, error) {
|
||||||
|
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||||
|
if err != nil {
|
||||||
|
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||||
|
switch storageErr.ServiceCode() {
|
||||||
|
case azblob.ServiceCodeContainerAlreadyExists:
|
||||||
|
return false, nil
|
||||||
|
case azblob.ServiceCodeContainerBeingDeleted:
|
||||||
|
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
||||||
|
// When a container is deleted, a container with the same name cannot be created
|
||||||
|
// for at least 30 seconds; the container may not be available for more than 30
|
||||||
|
// seconds if the service is still processing the request.
|
||||||
|
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
||||||
|
f.cache.MarkDeleted(container)
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f.shouldRetry(err)
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
|
||||||
|
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
|
||||||
empty := true
|
empty := true
|
||||||
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||||
empty = false
|
empty = false
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -848,47 +850,42 @@ func (f *Fs) isEmpty(dir string) (err error) {
|
|||||||
|
|
||||||
// deleteContainer deletes the container. It can delete a full
|
// deleteContainer deletes the container. It can delete a full
|
||||||
// container so use isEmpty if you don't want that.
|
// container so use isEmpty if you don't want that.
|
||||||
func (f *Fs) deleteContainer() error {
|
func (f *Fs) deleteContainer(ctx context.Context, container string) error {
|
||||||
f.containerOKMu.Lock()
|
return f.cache.Remove(container, func() error {
|
||||||
defer f.containerOKMu.Unlock()
|
options := azblob.ContainerAccessConditions{}
|
||||||
options := azblob.ContainerAccessConditions{}
|
return f.pacer.Call(func() (bool, error) {
|
||||||
ctx := context.Background()
|
_, err := f.cntURL(container).GetProperties(ctx, azblob.LeaseAccessConditions{})
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
if err == nil {
|
||||||
_, err := f.cntURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
|
_, err = f.cntURL(container).Delete(ctx, options)
|
||||||
if err == nil {
|
}
|
||||||
_, err = f.cntURL.Delete(ctx, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||||
return false, fs.ErrorDirNotFound
|
return false, fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.shouldRetry(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(err)
|
||||||
}
|
})
|
||||||
|
|
||||||
return f.shouldRetry(err)
|
|
||||||
})
|
})
|
||||||
if err == nil {
|
|
||||||
f.containerOK = false
|
|
||||||
f.containerDeleted = true
|
|
||||||
}
|
|
||||||
return errors.Wrap(err, "failed to delete container")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir deletes the container if the fs is at the root
|
// Rmdir deletes the container if the fs is at the root
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
err := f.isEmpty(dir)
|
container, directory := f.split(dir)
|
||||||
|
if container == "" || directory != "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := f.isEmpty(ctx, container, directory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if f.root != "" || dir != "" {
|
return f.deleteContainer(ctx, container)
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return f.deleteContainer()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision of the remote
|
// Precision of the remote
|
||||||
@@ -902,13 +899,14 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Purge deletes all the files and directories including the old versions.
|
// Purge deletes all the files and directories including the old versions.
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
dir := "" // forward compat!
|
dir := "" // forward compat!
|
||||||
if f.root != "" || dir != "" {
|
container, directory := f.split(dir)
|
||||||
// Delegate to caller if not root container
|
if container == "" || directory != "" {
|
||||||
|
// Delegate to caller if not root of a container
|
||||||
return fs.ErrorCantPurge
|
return fs.ErrorCantPurge
|
||||||
}
|
}
|
||||||
return f.deleteContainer()
|
return f.deleteContainer(ctx, container)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
@@ -920,8 +918,9 @@ func (f *Fs) Purge() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
err := f.Mkdir("")
|
dstContainer, dstPath := f.split(remote)
|
||||||
|
err := f.makeContainer(ctx, dstContainer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -930,7 +929,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
dstBlobURL := f.getBlobReference(remote)
|
dstBlobURL := f.getBlobReference(dstContainer, dstPath)
|
||||||
srcBlobURL := srcObj.getBlobReference()
|
srcBlobURL := srcObj.getBlobReference()
|
||||||
|
|
||||||
source, err := url.Parse(srcBlobURL.String())
|
source, err := url.Parse(srcBlobURL.String())
|
||||||
@@ -939,7 +938,6 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
options := azblob.BlobAccessConditions{}
|
options := azblob.BlobAccessConditions{}
|
||||||
ctx := context.Background()
|
|
||||||
var startCopy *azblob.BlobStartCopyFromURLResponse
|
var startCopy *azblob.BlobStartCopyFromURLResponse
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@@ -960,7 +958,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
copyStatus = getMetadata.CopyStatus()
|
copyStatus = getMetadata.CopyStatus()
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.NewObject(remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -984,7 +982,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -1038,7 +1036,7 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
|
|||||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||||
o.mimeType = info.ContentType()
|
o.mimeType = info.ContentType()
|
||||||
o.size = size
|
o.size = size
|
||||||
o.modTime = time.Time(info.LastModified())
|
o.modTime = info.LastModified()
|
||||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||||
o.setMetadata(metadata)
|
o.setMetadata(metadata)
|
||||||
|
|
||||||
@@ -1064,7 +1062,8 @@ func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
|||||||
|
|
||||||
// getBlobReference creates an empty blob reference with no metadata
|
// getBlobReference creates an empty blob reference with no metadata
|
||||||
func (o *Object) getBlobReference() azblob.BlobURL {
|
func (o *Object) getBlobReference() azblob.BlobURL {
|
||||||
return o.fs.getBlobReference(o.remote)
|
container, directory := o.split()
|
||||||
|
return o.fs.getBlobReference(container, directory)
|
||||||
}
|
}
|
||||||
|
|
||||||
// clearMetaData clears enough metadata so readMetaData will re-read it
|
// clearMetaData clears enough metadata so readMetaData will re-read it
|
||||||
@@ -1104,12 +1103,6 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
||||||
}
|
}
|
||||||
|
|
||||||
// timeString returns modTime as the number of milliseconds
|
|
||||||
// elapsed since January 1, 1970 UTC as a decimal string.
|
|
||||||
func timeString(modTime time.Time) string {
|
|
||||||
return strconv.FormatInt(modTime.UnixNano()/1E6, 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTimeString converts a decimal string number of milliseconds
|
// parseTimeString converts a decimal string number of milliseconds
|
||||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||||
// the modTime variable.
|
// the modTime variable.
|
||||||
@@ -1130,14 +1123,14 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() (result time.Time) {
|
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||||
// The error is logged in readMetaData
|
// The error is logged in readMetaData
|
||||||
_ = o.readMetaData()
|
_ = o.readMetaData()
|
||||||
return o.modTime
|
return o.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// Make sure o.meta is not nil
|
// Make sure o.meta is not nil
|
||||||
if o.meta == nil {
|
if o.meta == nil {
|
||||||
o.meta = make(map[string]string, 1)
|
o.meta = make(map[string]string, 1)
|
||||||
@@ -1146,7 +1139,6 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
|||||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||||
|
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
ctx := context.Background()
|
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
@@ -1164,14 +1156,14 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
// Offset and Count for range download
|
// Offset and Count for range download
|
||||||
var offset int64
|
var offset int64
|
||||||
var count int64
|
var count int64
|
||||||
if o.AccessTier() == azblob.AccessTierArchive {
|
if o.AccessTier() == azblob.AccessTierArchive {
|
||||||
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||||
}
|
}
|
||||||
|
fs.FixRangeOption(options, o.size)
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
case *fs.RangeOption:
|
case *fs.RangeOption:
|
||||||
@@ -1188,7 +1180,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
ctx := context.Background()
|
|
||||||
ac := azblob.BlobAccessConditions{}
|
ac := azblob.BlobAccessConditions{}
|
||||||
var dowloadResponse *azblob.DownloadResponse
|
var dowloadResponse *azblob.DownloadResponse
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -1377,31 +1368,32 @@ outer:
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
err = o.fs.Mkdir("")
|
container, _ := o.split()
|
||||||
|
err = o.fs.makeContainer(ctx, container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
// Update Mod time
|
// Update Mod time
|
||||||
o.updateMetadataWithModTime(src.ModTime())
|
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||||
httpHeaders.ContentType = fs.MimeType(o)
|
httpHeaders.ContentType = fs.MimeType(ctx, o)
|
||||||
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
// Compute the Content-MD5 of the file, for multiparts uploads it
|
||||||
// MD5 only for PutBlob requests
|
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||||
if size < int64(o.fs.opt.UploadCutoff) {
|
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
|
||||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
// in order to validate its integrity during transport
|
||||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||||
if err == nil {
|
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
if err == nil {
|
||||||
} else {
|
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
} else {
|
||||||
}
|
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1414,14 +1406,13 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
||||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||||
// size, so upload with a multpart upload to work around.
|
// size, so upload with a multpart upload to work around.
|
||||||
// See: https://github.com/ncw/rclone/issues/2653
|
// See: https://github.com/rclone/rclone/issues/2653
|
||||||
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
|
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
|
||||||
if size == int64(o.fs.opt.ChunkSize) {
|
if size == int64(o.fs.opt.ChunkSize) {
|
||||||
multipartUpload = true
|
multipartUpload = true
|
||||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
// Don't retry, return a retry error instead
|
// Don't retry, return a retry error instead
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
if multipartUpload {
|
if multipartUpload {
|
||||||
@@ -1454,11 +1445,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
snapShotOptions := azblob.DeleteSnapshotsOptionNone
|
snapShotOptions := azblob.DeleteSnapshotsOptionNone
|
||||||
ac := azblob.BlobAccessConditions{}
|
ac := azblob.BlobAccessConditions{}
|
||||||
ctx := context.Background()
|
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
@@ -1466,7 +1456,7 @@ func (o *Object) Remove() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9,!solaris,go1.8
|
// +build !plan9,!solaris
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
// +build !plan9,!solaris,go1.8
|
// +build !plan9,!solaris
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Build for azureblob for unsupported platforms to stop go complaining
|
// Build for azureblob for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
// +build plan9 solaris !go1.8
|
// +build plan9 solaris
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error describes a B2 error response
|
// Error describes a B2 error response
|
||||||
@@ -17,12 +17,12 @@ type Error struct {
|
|||||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error statisfies the error interface
|
// Error satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal statisfies the Fatal interface
|
// Fatal satisfies the Fatal interface
|
||||||
//
|
//
|
||||||
// It indicates which errors should be treated as fatal
|
// It indicates which errors should be treated as fatal
|
||||||
func (e *Error) Fatal() bool {
|
func (e *Error) Fatal() bool {
|
||||||
@@ -100,7 +100,7 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
|||||||
return Timestamp(newT), base[:versionStart] + ext
|
return Timestamp(newT), base[:versionStart] + ext
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsZero returns true if the timestamp is unitialised
|
// IsZero returns true if the timestamp is uninitialized
|
||||||
func (t Timestamp) IsZero() bool {
|
func (t Timestamp) IsZero() bool {
|
||||||
return time.Time(t).IsZero()
|
return time.Time(t).IsZero()
|
||||||
}
|
}
|
||||||
@@ -189,6 +189,21 @@ type GetUploadURLResponse struct {
|
|||||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
|
||||||
|
type GetDownloadAuthorizationRequest struct {
|
||||||
|
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||||
|
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
|
||||||
|
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
|
||||||
|
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
|
||||||
|
type GetDownloadAuthorizationResponse struct {
|
||||||
|
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||||
|
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
|
||||||
|
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
|
||||||
|
}
|
||||||
|
|
||||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||||
type FileInfo struct {
|
type FileInfo struct {
|
||||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||||
@@ -311,3 +326,14 @@ type CancelLargeFileResponse struct {
|
|||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CopyFileRequest is as passed to b2_copy_file
|
||||||
|
type CopyFileRequest struct {
|
||||||
|
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||||
|
Name string `json:"fileName"` // The name of the new file being created.
|
||||||
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
|
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||||
|
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||||
|
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||||
|
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/b2/api"
|
"github.com/rclone/rclone/backend/b2/api"
|
||||||
"github.com/ncw/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|||||||
957
backend/b2/b2.go
957
backend/b2/b2.go
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test b2 string encoding
|
// Test b2 string encoding
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ package b2
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ package b2
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -14,12 +15,12 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/b2/api"
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/accounting"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/lib/rest"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/backend/b2/api"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
type hashAppendingReader struct {
|
type hashAppendingReader struct {
|
||||||
@@ -80,7 +81,7 @@ type largeUpload struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||||
remote := o.remote
|
remote := o.remote
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
parts := int64(0)
|
parts := int64(0)
|
||||||
@@ -98,26 +99,27 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
|||||||
sha1SliceSize = parts
|
sha1SliceSize = parts
|
||||||
}
|
}
|
||||||
|
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_start_large_file",
|
Path: "/b2_start_large_file",
|
||||||
}
|
}
|
||||||
bucketID, err := f.getBucketID()
|
bucket, bucketPath := o.split()
|
||||||
|
bucketID, err := f.getBucketID(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var request = api.StartLargeFileRequest{
|
var request = api.StartLargeFileRequest{
|
||||||
BucketID: bucketID,
|
BucketID: bucketID,
|
||||||
Name: o.fs.root + remote,
|
Name: bucketPath,
|
||||||
ContentType: fs.MimeType(src),
|
ContentType: fs.MimeType(ctx, src),
|
||||||
Info: map[string]string{
|
Info: map[string]string{
|
||||||
timeKey: timeString(modTime),
|
timeKey: timeString(modTime),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Set the SHA1 if known
|
// Set the SHA1 if known
|
||||||
if !o.fs.opt.DisableCheckSum {
|
if !o.fs.opt.DisableCheckSum {
|
||||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||||
request.Info[sha1Key] = calculatedSha1
|
request.Info[sha1Key] = calculatedSha1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ type Error struct {
|
|||||||
RequestID string `json:"request_id"`
|
RequestID string `json:"request_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and statistifes the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||||
if e.Message != "" {
|
if e.Message != "" {
|
||||||
@@ -57,7 +57,7 @@ func (e *Error) Error() string {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check Error statisfies the error interface
|
// Check Error satisfies the error interface
|
||||||
var _ error = (*Error)(nil)
|
var _ error = (*Error)(nil)
|
||||||
|
|
||||||
// ItemFields are the fields needed for FileInfo
|
// ItemFields are the fields needed for FileInfo
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ package box
|
|||||||
// FIXME box can copy a directory
|
// FIXME box can copy a directory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
@@ -20,19 +21,19 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/box/api"
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/config"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/lib/dircache"
|
|
||||||
"github.com/ncw/rclone/lib/oauthutil"
|
|
||||||
"github.com/ncw/rclone/lib/pacer"
|
|
||||||
"github.com/ncw/rclone/lib/rest"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/backend/box/api"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/dircache"
|
||||||
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -111,7 +112,7 @@ type Fs struct {
|
|||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the one drive server
|
srv *rest.Client // the connection to the one drive server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *pacer.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
}
|
}
|
||||||
@@ -171,13 +172,13 @@ var retryErrorCodes = []int{
|
|||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||||
authRety := false
|
authRetry := false
|
||||||
|
|
||||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||||
authRety = true
|
authRetry = true
|
||||||
fs.Debugf(nil, "Should retry: %v", err)
|
fs.Debugf(nil, "Should retry: %v", err)
|
||||||
}
|
}
|
||||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// substitute reserved characters for box
|
// substitute reserved characters for box
|
||||||
@@ -193,9 +194,9 @@ func restoreReservedChars(x string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
@@ -238,6 +239,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -260,7 +262,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -271,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
_, err := f.readMetaDataForPath("")
|
_, err := f.readMetaDataForPath(ctx, "")
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -279,7 +281,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.dirCache = dircache.New(root, rootID, f)
|
f.dirCache = dircache.New(root, rootID, f)
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Assume it is a file
|
// Assume it is a file
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
@@ -287,12 +289,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||||
tempF.root = newRoot
|
tempF.root = newRoot
|
||||||
// Make new Fs which is the parent
|
// Make new Fs which is the parent
|
||||||
err = tempF.dirCache.FindRoot(false)
|
err = tempF.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@@ -303,7 +305,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.features.Fill(&tempF)
|
f.features.Fill(&tempF)
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
// XXX: update the old f here instead of returning tempF, since
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
// `features` were already filled with functions having *f as a receiver.
|
||||||
// See https://github.com/ncw/rclone/issues/2182
|
// See https://github.com/rclone/rclone/issues/2182
|
||||||
f.dirCache = tempF.dirCache
|
f.dirCache = tempF.dirCache
|
||||||
f.root = tempF.root
|
f.root = tempF.root
|
||||||
// return an error with an fs which points to the parent
|
// return an error with an fs which points to the parent
|
||||||
@@ -323,7 +325,7 @@ func (f *Fs) rootSlash() string {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@@ -333,7 +335,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
|||||||
// Set info
|
// Set info
|
||||||
err = o.setMetaData(info)
|
err = o.setMetaData(info)
|
||||||
} else {
|
} else {
|
||||||
err = o.readMetaData() // reads info and meta, returning an error
|
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -343,12 +345,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||||
if item.Name == leaf {
|
if item.Name == leaf {
|
||||||
@@ -368,7 +370,7 @@ func fieldsValue() url.Values {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var info *api.Item
|
var info *api.Item
|
||||||
@@ -467,12 +469,12 @@ OUTER:
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -486,7 +488,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// FIXME more info from dir?
|
// FIXME more info from dir?
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
} else if info.Type == api.ItemTypeFile {
|
} else if info.Type == api.ItemTypeFile {
|
||||||
o, err := f.newObjectWithInfo(remote, info)
|
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@@ -510,9 +512,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// Returns the object, leaf, directoryID and error
|
// Returns the object, leaf, directoryID and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -529,22 +531,22 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(in, src)
|
return f.PutUnchecked(ctx, in, src)
|
||||||
default:
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutUnchecked the object into the container
|
// PutUnchecked the object into the container
|
||||||
@@ -554,26 +556,26 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
o, _, _, err := f.createObject(remote, modTime, size)
|
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
err := f.dirCache.FindRoot(true)
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
_, err = f.dirCache.FindDir(dir, true)
|
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -593,17 +595,17 @@ func (f *Fs) deleteObject(id string) error {
|
|||||||
|
|
||||||
// purgeCheck removes the root directory, if check is set then it
|
// purgeCheck removes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(false)
|
err := dc.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rootID, err := dc.FindDir(dir, false)
|
rootID, err := dc.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -633,8 +635,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@@ -651,13 +653,13 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
err := srcObj.readMetaData()
|
err := srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -669,7 +671,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -708,8 +710,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck(ctx, "", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// move a file or folder
|
// move a file or folder
|
||||||
@@ -746,7 +748,7 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@@ -754,7 +756,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -780,7 +782,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@@ -796,14 +798,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the root src directory
|
// find the root src directory
|
||||||
err := srcFs.dirCache.FindRoot(false)
|
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root dst directory
|
// find the root dst directory
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
err = f.dirCache.FindRoot(true)
|
err = f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -819,14 +821,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
findPath = f.root
|
findPath = f.root
|
||||||
}
|
}
|
||||||
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
|
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check destination does not exist
|
// Check destination does not exist
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@@ -837,7 +839,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find ID of src
|
// Find ID of src
|
||||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -852,8 +854,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||||
id, err := f.dirCache.FindDir(remote, false)
|
id, err := f.dirCache.FindDir(ctx, remote, false)
|
||||||
var opts rest.Opts
|
var opts rest.Opts
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||||
@@ -865,7 +867,7 @@ func (f *Fs) PublicLink(remote string) (string, error) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||||
o, err := f.NewObject(remote)
|
o, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -928,7 +930,7 @@ func (o *Object) srvPath() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.SHA1 {
|
if t != hash.SHA1 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -937,7 +939,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// Size returns the size of an object in bytes
|
// Size returns the size of an object in bytes
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return 0
|
return 0
|
||||||
@@ -962,11 +964,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
|||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// it also sets the info
|
// it also sets the info
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.hasMetaData {
|
if o.hasMetaData {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
if apiErr, ok := err.(*api.Error); ok {
|
||||||
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
|
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
|
||||||
@@ -983,8 +985,8 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
@@ -993,7 +995,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// setModTime sets the modification time of the local fs object
|
// setModTime sets the modification time of the local fs object
|
||||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "PUT",
|
Method: "PUT",
|
||||||
Path: "/files/" + o.id,
|
Path: "/files/" + o.id,
|
||||||
@@ -1011,8 +1013,8 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
info, err := o.setModTime(modTime)
|
info, err := o.setModTime(ctx, modTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1025,7 +1027,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
if o.id == "" {
|
if o.id == "" {
|
||||||
return nil, errors.New("can't download - no id")
|
return nil, errors.New("can't download - no id")
|
||||||
}
|
}
|
||||||
@@ -1093,16 +1095,16 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
|
|||||||
// If existing is set then it updates the object rather than creating a new one
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
o.fs.tokenRenewer.Start()
|
o.fs.tokenRenewer.Start()
|
||||||
defer o.fs.tokenRenewer.Stop()
|
defer o.fs.tokenRenewer.Stop()
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
remote := o.Remote()
|
remote := o.Remote()
|
||||||
|
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1117,7 +1119,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return o.fs.deleteObject(o.id)
|
return o.fs.deleteObject(o.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ package box_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/box"
|
"github.com/rclone/rclone/backend/box"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -14,11 +14,11 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/box/api"
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/accounting"
|
|
||||||
"github.com/ncw/rclone/lib/rest"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/backend/box/api"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
// createUploadSession creates an upload session for the object
|
// createUploadSession creates an upload session for the object
|
||||||
@@ -97,7 +97,7 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
|
|||||||
var body []byte
|
var body []byte
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
// For discussion of this value see:
|
// For discussion of this value see:
|
||||||
// https://github.com/ncw/rclone/issues/2054
|
// https://github.com/rclone/rclone/issues/2054
|
||||||
maxTries := o.fs.opt.CommitRetries
|
maxTries := o.fs.opt.CommitRetries
|
||||||
const defaultDelay = 10
|
const defaultDelay = 10
|
||||||
var tries int
|
var tries int
|
||||||
@@ -112,7 +112,7 @@ outer:
|
|||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
delay := defaultDelay
|
delay := defaultDelay
|
||||||
why := "unknown"
|
var why string
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Sometimes we get 400 Error with
|
// Sometimes we get 400 Error with
|
||||||
// parts_mismatch immediately after uploading
|
// parts_mismatch immediately after uploading
|
||||||
@@ -211,8 +211,8 @@ outer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
reqSize := remaining
|
reqSize := remaining
|
||||||
if reqSize >= int64(chunkSize) {
|
if reqSize >= chunkSize {
|
||||||
reqSize = int64(chunkSize)
|
reqSize = chunkSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a block of memory
|
// Make a block of memory
|
||||||
|
|||||||
193
backend/cache/cache.go
vendored
193
backend/cache/cache.go
vendored
@@ -18,18 +18,19 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/crypt"
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/config"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
|
||||||
"github.com/ncw/rclone/fs/fspath"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/fs/rc"
|
|
||||||
"github.com/ncw/rclone/fs/walk"
|
|
||||||
"github.com/ncw/rclone/lib/atexit"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/cache"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/rc"
|
||||||
|
"github.com/rclone/rclone/fs/walk"
|
||||||
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -481,7 +482,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||||
}
|
}
|
||||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||||
f.tempFs, err = fs.NewFs(f.opt.TempWritePath)
|
f.tempFs, err = cache.Get(f.opt.TempWritePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||||
}
|
}
|
||||||
@@ -508,7 +509,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||||
pollInterval := make(chan time.Duration, 1)
|
pollInterval := make(chan time.Duration, 1)
|
||||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||||
doChangeNotify(f.receiveChangeNotify, pollInterval)
|
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -576,7 +577,7 @@ The slice indices are similar to Python slices: start[:end]
|
|||||||
|
|
||||||
start is the 0 based chunk number from the beginning of the file
|
start is the 0 based chunk number from the beginning of the file
|
||||||
to fetch inclusive. end is 0 based chunk number from the beginning
|
to fetch inclusive. end is 0 based chunk number from the beginning
|
||||||
of the file to fetch exclisive.
|
of the file to fetch exclusive.
|
||||||
Both values can be negative, in which case they count from the back
|
Both values can be negative, in which case they count from the back
|
||||||
of the file. The value "-5:" represents the last 5 chunks of a file.
|
of the file. The value "-5:" represents the last 5 chunks of a file.
|
||||||
|
|
||||||
@@ -599,7 +600,7 @@ is used on top of the cache.
|
|||||||
return f, fsErr
|
return f, fsErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
|
func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
m, err := f.Stats()
|
m, err := f.Stats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -626,7 +627,7 @@ func (f *Fs) unwrapRemote(remote string) string {
|
|||||||
return remote
|
return remote
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
remoteInt, ok := in["remote"]
|
remoteInt, ok := in["remote"]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -671,7 +672,7 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
|
func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||||
type chunkRange struct {
|
type chunkRange struct {
|
||||||
start, end int64
|
start, end int64
|
||||||
}
|
}
|
||||||
@@ -776,18 +777,18 @@ func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
|
|||||||
for _, pair := range files {
|
for _, pair := range files {
|
||||||
file, remote := pair[0], pair[1]
|
file, remote := pair[0], pair[1]
|
||||||
var status fileStatus
|
var status fileStatus
|
||||||
o, err := f.NewObject(remote)
|
o, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
co := o.(*Object)
|
co := o.(*Object)
|
||||||
err = co.refreshFromSource(true)
|
err = co.refreshFromSource(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
handle := NewObjectHandle(co, f)
|
handle := NewObjectHandle(ctx, co, f)
|
||||||
handle.UseMemory = false
|
handle.UseMemory = false
|
||||||
handle.scaleWorkers(1)
|
handle.scaleWorkers(1)
|
||||||
walkChunkRanges(crs, co.Size(), func(chunk int64) {
|
walkChunkRanges(crs, co.Size(), func(chunk int64) {
|
||||||
@@ -870,10 +871,10 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChangeNotify can subsribe multiple callers
|
// ChangeNotify can subscribe multiple callers
|
||||||
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
||||||
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
||||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||||
f.parentsForgetMu.Lock()
|
f.parentsForgetMu.Lock()
|
||||||
defer f.parentsForgetMu.Unlock()
|
defer f.parentsForgetMu.Unlock()
|
||||||
fs.Debugf(f, "subscribing to ChangeNotify")
|
fs.Debugf(f, "subscribing to ChangeNotify")
|
||||||
@@ -920,7 +921,7 @@ func (f *Fs) TempUploadWaitTime() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
// NewObject finds the Object at remote.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
fs.Debugf(f, "new object '%s'", remote)
|
fs.Debugf(f, "new object '%s'", remote)
|
||||||
@@ -939,16 +940,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
|||||||
// search for entry in source or temp fs
|
// search for entry in source or temp fs
|
||||||
var obj fs.Object
|
var obj fs.Object
|
||||||
if f.opt.TempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
obj, err = f.tempFs.NewObject(remote)
|
obj, err = f.tempFs.NewObject(ctx, remote)
|
||||||
// not found in temp fs
|
// not found in temp fs
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "find: not found in local cache fs")
|
fs.Debugf(remote, "find: not found in local cache fs")
|
||||||
obj, err = f.Fs.NewObject(remote)
|
obj, err = f.Fs.NewObject(ctx, remote)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(obj, "find: found in local cache fs")
|
fs.Debugf(obj, "find: found in local cache fs")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
obj, err = f.Fs.NewObject(remote)
|
obj, err = f.Fs.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// not found in either fs
|
// not found in either fs
|
||||||
@@ -958,13 +959,13 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// cache the new entry
|
// cache the new entry
|
||||||
co = ObjectFromOriginal(f, obj).persist()
|
co = ObjectFromOriginal(ctx, f, obj).persist()
|
||||||
fs.Debugf(co, "find: cached object")
|
fs.Debugf(co, "find: cached object")
|
||||||
return co, nil
|
return co, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// List the objects and directories in dir into entries
|
// List the objects and directories in dir into entries
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
fs.Debugf(f, "list '%s'", dir)
|
fs.Debugf(f, "list '%s'", dir)
|
||||||
cd := ShallowDirectory(f, dir)
|
cd := ShallowDirectory(f, dir)
|
||||||
|
|
||||||
@@ -994,12 +995,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
|
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
|
||||||
|
|
||||||
for _, queuedRemote := range queuedEntries {
|
for _, queuedRemote := range queuedEntries {
|
||||||
queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote))
|
queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
|
fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
co := ObjectFromOriginal(f, queuedEntry).persist()
|
co := ObjectFromOriginal(ctx, f, queuedEntry).persist()
|
||||||
fs.Debugf(co, "list: cached temp object")
|
fs.Debugf(co, "list: cached temp object")
|
||||||
cachedEntries = append(cachedEntries, co)
|
cachedEntries = append(cachedEntries, co)
|
||||||
}
|
}
|
||||||
@@ -1007,7 +1008,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// search from the source
|
// search from the source
|
||||||
sourceEntries, err := f.Fs.List(dir)
|
sourceEntries, err := f.Fs.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1045,11 +1046,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
|
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
co := ObjectFromOriginal(f, o).persist()
|
co := ObjectFromOriginal(ctx, f, o).persist()
|
||||||
cachedEntries = append(cachedEntries, co)
|
cachedEntries = append(cachedEntries, co)
|
||||||
fs.Debugf(dir, "list: cached object: %v", co)
|
fs.Debugf(dir, "list: cached object: %v", co)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
cdd := DirectoryFromOriginal(f, o)
|
cdd := DirectoryFromOriginal(ctx, f, o)
|
||||||
// check if the dir isn't expired and add it in cache if it isn't
|
// check if the dir isn't expired and add it in cache if it isn't
|
||||||
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||||
batchDirectories = append(batchDirectories, cdd)
|
batchDirectories = append(batchDirectories, cdd)
|
||||||
@@ -1079,8 +1080,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
return cachedEntries, nil
|
return cachedEntries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
|
||||||
entries, err := f.List(dir)
|
entries, err := f.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1088,7 +1089,7 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
|||||||
for i := 0; i < len(entries); i++ {
|
for i := 0; i < len(entries); i++ {
|
||||||
innerDir, ok := entries[i].(fs.Directory)
|
innerDir, ok := entries[i].(fs.Directory)
|
||||||
if ok {
|
if ok {
|
||||||
err := f.recurse(innerDir.Remote(), list)
|
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1105,21 +1106,21 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
|||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
// from dir recursively into out.
|
// from dir recursively into out.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
fs.Debugf(f, "list recursively from '%s'", dir)
|
fs.Debugf(f, "list recursively from '%s'", dir)
|
||||||
|
|
||||||
// we check if the source FS supports ListR
|
// we check if the source FS supports ListR
|
||||||
// if it does, we'll use that to get all the entries, cache them and return
|
// if it does, we'll use that to get all the entries, cache them and return
|
||||||
do := f.Fs.Features().ListR
|
do := f.Fs.Features().ListR
|
||||||
if do != nil {
|
if do != nil {
|
||||||
return do(dir, func(entries fs.DirEntries) error {
|
return do(ctx, dir, func(entries fs.DirEntries) error {
|
||||||
// we got called back with a set of entries so let's cache them and call the original callback
|
// we got called back with a set of entries so let's cache them and call the original callback
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
switch o := entry.(type) {
|
switch o := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
_ = f.cache.AddObject(ObjectFromOriginal(f, o))
|
_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o))
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
_ = f.cache.AddDir(DirectoryFromOriginal(f, o))
|
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("Unknown object type %T", entry)
|
return errors.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
@@ -1132,7 +1133,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
|
|
||||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.recurse(dir, list)
|
err = f.recurse(ctx, dir, list)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1141,9 +1142,9 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir makes the directory (container, bucket)
|
// Mkdir makes the directory (container, bucket)
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
fs.Debugf(f, "mkdir '%s'", dir)
|
fs.Debugf(f, "mkdir '%s'", dir)
|
||||||
err := f.Fs.Mkdir(dir)
|
err := f.Fs.Mkdir(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1171,7 +1172,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
fs.Debugf(f, "rmdir '%s'", dir)
|
fs.Debugf(f, "rmdir '%s'", dir)
|
||||||
|
|
||||||
if f.opt.TempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
@@ -1181,9 +1182,9 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
|
|
||||||
// we check if the source exists on the remote and make the same move on it too if it does
|
// we check if the source exists on the remote and make the same move on it too if it does
|
||||||
// otherwise, we skip this step
|
// otherwise, we skip this step
|
||||||
_, err := f.UnWrap().List(dir)
|
_, err := f.UnWrap().List(ctx, dir)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err := f.Fs.Rmdir(dir)
|
err := f.Fs.Rmdir(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1191,10 +1192,10 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var queuedEntries []*Object
|
var queuedEntries []*Object
|
||||||
err = walk.Walk(f.tempFs, dir, true, -1, func(path string, entries fs.DirEntries, err error) error {
|
err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||||
for _, o := range entries {
|
for _, o := range entries {
|
||||||
if oo, ok := o.(fs.Object); ok {
|
if oo, ok := o.(fs.Object); ok {
|
||||||
co := ObjectFromOriginal(f, oo)
|
co := ObjectFromOriginal(ctx, f, oo)
|
||||||
queuedEntries = append(queuedEntries, co)
|
queuedEntries = append(queuedEntries, co)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1211,7 +1212,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err := f.Fs.Rmdir(dir)
|
err := f.Fs.Rmdir(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1242,7 +1243,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server side move operations.
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
||||||
|
|
||||||
do := f.Fs.Features().DirMove
|
do := f.Fs.Features().DirMove
|
||||||
@@ -1264,8 +1265,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
f.backgroundRunner.pause()
|
f.backgroundRunner.pause()
|
||||||
defer f.backgroundRunner.play()
|
defer f.backgroundRunner.play()
|
||||||
|
|
||||||
_, errInWrap := srcFs.UnWrap().List(srcRemote)
|
_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote)
|
||||||
_, errInTemp := f.tempFs.List(srcRemote)
|
_, errInTemp := f.tempFs.List(ctx, srcRemote)
|
||||||
// not found in either fs
|
// not found in either fs
|
||||||
if errInWrap != nil && errInTemp != nil {
|
if errInWrap != nil && errInTemp != nil {
|
||||||
return fs.ErrorDirNotFound
|
return fs.ErrorDirNotFound
|
||||||
@@ -1274,7 +1275,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
// we check if the source exists on the remote and make the same move on it too if it does
|
// we check if the source exists on the remote and make the same move on it too if it does
|
||||||
// otherwise, we skip this step
|
// otherwise, we skip this step
|
||||||
if errInWrap == nil {
|
if errInWrap == nil {
|
||||||
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
|
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1287,10 +1288,10 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var queuedEntries []*Object
|
var queuedEntries []*Object
|
||||||
err := walk.Walk(f.tempFs, srcRemote, true, -1, func(path string, entries fs.DirEntries, err error) error {
|
err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||||
for _, o := range entries {
|
for _, o := range entries {
|
||||||
if oo, ok := o.(fs.Object); ok {
|
if oo, ok := o.(fs.Object); ok {
|
||||||
co := ObjectFromOriginal(f, oo)
|
co := ObjectFromOriginal(ctx, f, oo)
|
||||||
queuedEntries = append(queuedEntries, co)
|
queuedEntries = append(queuedEntries, co)
|
||||||
if co.tempFileStartedUpload() {
|
if co.tempFileStartedUpload() {
|
||||||
fs.Errorf(co, "can't move - upload has already started. need to finish that")
|
fs.Errorf(co, "can't move - upload has already started. need to finish that")
|
||||||
@@ -1311,16 +1312,16 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
|
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
|
||||||
return fs.ErrorCantDirMove
|
return fs.ErrorCantDirMove
|
||||||
}
|
}
|
||||||
err = do(f.tempFs, srcRemote, dstRemote)
|
err = do(ctx, f.tempFs, srcRemote, dstRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = f.cache.ReconcileTempUploads(f)
|
err = f.cache.ReconcileTempUploads(ctx, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
|
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1426,10 +1427,10 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||||
|
|
||||||
// put in to the remote path
|
// put in to the remote path
|
||||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||||
var err error
|
var err error
|
||||||
var obj fs.Object
|
var obj fs.Object
|
||||||
|
|
||||||
@@ -1440,7 +1441,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
_ = f.cache.ExpireDir(parentCd)
|
_ = f.cache.ExpireDir(parentCd)
|
||||||
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||||
|
|
||||||
obj, err = f.tempFs.Put(in, src, options...)
|
obj, err = f.tempFs.Put(ctx, in, src, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
|
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1455,14 +1456,14 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
// if cache writes is enabled write it first through cache
|
// if cache writes is enabled write it first through cache
|
||||||
} else if f.opt.StoreWrites {
|
} else if f.opt.StoreWrites {
|
||||||
f.cacheReader(in, src, func(inn io.Reader) {
|
f.cacheReader(in, src, func(inn io.Reader) {
|
||||||
obj, err = put(inn, src, options...)
|
obj, err = put(ctx, inn, src, options...)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
|
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
|
||||||
}
|
}
|
||||||
// last option: save it directly in remote fs
|
// last option: save it directly in remote fs
|
||||||
} else {
|
} else {
|
||||||
obj, err = put(in, src, options...)
|
obj, err = put(ctx, in, src, options...)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(obj, "put: uploaded to remote fs")
|
fs.Debugf(obj, "put: uploaded to remote fs")
|
||||||
}
|
}
|
||||||
@@ -1474,7 +1475,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
}
|
}
|
||||||
|
|
||||||
// cache the new file
|
// cache the new file
|
||||||
cachedObj := ObjectFromOriginal(f, obj)
|
cachedObj := ObjectFromOriginal(ctx, f, obj)
|
||||||
|
|
||||||
// deleting cached chunks and info to be replaced with new ones
|
// deleting cached chunks and info to be replaced with new ones
|
||||||
_ = f.cache.RemoveObject(cachedObj.abs())
|
_ = f.cache.RemoveObject(cachedObj.abs())
|
||||||
@@ -1497,33 +1498,33 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
// Put in to the remote path with the modTime given of the given size
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
fs.Debugf(f, "put data at '%s'", src.Remote())
|
fs.Debugf(f, "put data at '%s'", src.Remote())
|
||||||
return f.put(in, src, options, f.Fs.Put)
|
return f.put(ctx, in, src, options, f.Fs.Put)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
do := f.Fs.Features().PutUnchecked
|
do := f.Fs.Features().PutUnchecked
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("can't PutUnchecked")
|
return nil, errors.New("can't PutUnchecked")
|
||||||
}
|
}
|
||||||
fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
|
fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
|
||||||
return f.put(in, src, options, do)
|
return f.put(ctx, in, src, options, do)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads the object
|
// PutStream uploads the object
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
do := f.Fs.Features().PutStream
|
do := f.Fs.Features().PutStream
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("can't PutStream")
|
return nil, errors.New("can't PutStream")
|
||||||
}
|
}
|
||||||
fs.Debugf(f, "put data streaming in '%s'", src.Remote())
|
fs.Debugf(f, "put data streaming in '%s'", src.Remote())
|
||||||
return f.put(in, src, options, do)
|
return f.put(ctx, in, src, options, do)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
||||||
|
|
||||||
do := f.Fs.Features().Copy
|
do := f.Fs.Features().Copy
|
||||||
@@ -1543,13 +1544,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
// refresh from source or abort
|
// refresh from source or abort
|
||||||
if err := srcObj.refreshFromSource(false); err != nil {
|
if err := srcObj.refreshFromSource(ctx, false); err != nil {
|
||||||
fs.Errorf(f, "can't copy %v - %v", src, err)
|
fs.Errorf(f, "can't copy %v - %v", src, err)
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
if srcObj.isTempFile() {
|
if srcObj.isTempFile() {
|
||||||
// we check if the feature is stil active
|
// we check if the feature is still active
|
||||||
if f.opt.TempWritePath == "" {
|
if f.opt.TempWritePath == "" {
|
||||||
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
@@ -1562,7 +1563,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
obj, err := do(srcObj.Object, remote)
|
obj, err := do(ctx, srcObj.Object, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(srcObj, "error moving in cache: %v", err)
|
fs.Errorf(srcObj, "error moving in cache: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1570,7 +1571,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
fs.Debugf(obj, "copy: file copied")
|
fs.Debugf(obj, "copy: file copied")
|
||||||
|
|
||||||
// persist new
|
// persist new
|
||||||
co := ObjectFromOriginal(f, obj).persist()
|
co := ObjectFromOriginal(ctx, f, obj).persist()
|
||||||
fs.Debugf(co, "copy: added to cache")
|
fs.Debugf(co, "copy: added to cache")
|
||||||
// expire the destination path
|
// expire the destination path
|
||||||
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
|
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
|
||||||
@@ -1597,7 +1598,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
||||||
|
|
||||||
// if source fs doesn't support move abort
|
// if source fs doesn't support move abort
|
||||||
@@ -1618,14 +1619,14 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
// refresh from source or abort
|
// refresh from source or abort
|
||||||
if err := srcObj.refreshFromSource(false); err != nil {
|
if err := srcObj.refreshFromSource(ctx, false); err != nil {
|
||||||
fs.Errorf(f, "can't move %v - %v", src, err)
|
fs.Errorf(f, "can't move %v - %v", src, err)
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
|
|
||||||
// if this is a temp object then we perform the changes locally
|
// if this is a temp object then we perform the changes locally
|
||||||
if srcObj.isTempFile() {
|
if srcObj.isTempFile() {
|
||||||
// we check if the feature is stil active
|
// we check if the feature is still active
|
||||||
if f.opt.TempWritePath == "" {
|
if f.opt.TempWritePath == "" {
|
||||||
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
@@ -1654,7 +1655,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
fs.Debugf(srcObj, "move: queued file moved to %v", remote)
|
fs.Debugf(srcObj, "move: queued file moved to %v", remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
obj, err := do(srcObj.Object, remote)
|
obj, err := do(ctx, srcObj.Object, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(srcObj, "error moving: %v", err)
|
fs.Errorf(srcObj, "error moving: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1679,7 +1680,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// advertise to ChangeNotify if wrapped doesn't do that
|
// advertise to ChangeNotify if wrapped doesn't do that
|
||||||
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||||
// persist new
|
// persist new
|
||||||
cachedObj := ObjectFromOriginal(f, obj).persist()
|
cachedObj := ObjectFromOriginal(ctx, f, obj).persist()
|
||||||
fs.Debugf(cachedObj, "move: added to cache")
|
fs.Debugf(cachedObj, "move: added to cache")
|
||||||
// expire new parent
|
// expire new parent
|
||||||
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
|
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
|
||||||
@@ -1701,7 +1702,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Purge all files in the root and the root directory
|
// Purge all files in the root and the root directory
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
fs.Infof(f, "purging cache")
|
fs.Infof(f, "purging cache")
|
||||||
f.cache.Purge()
|
f.cache.Purge()
|
||||||
|
|
||||||
@@ -1710,7 +1711,7 @@ func (f *Fs) Purge() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err := do()
|
err := do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1719,7 +1720,7 @@ func (f *Fs) Purge() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
func (f *Fs) CleanUp() error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
f.CleanUpCache(false)
|
f.CleanUpCache(false)
|
||||||
|
|
||||||
do := f.Fs.Features().CleanUp
|
do := f.Fs.Features().CleanUp
|
||||||
@@ -1727,16 +1728,16 @@ func (f *Fs) CleanUp() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return do()
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
// About gets quota information from the Fs
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("About not supported")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do()
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns stats about the cache storage
|
// Stats returns stats about the cache storage
|
||||||
@@ -1863,6 +1864,24 @@ func cleanPath(p string) string {
|
|||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UserInfo returns info about the connected user
|
||||||
|
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||||
|
do := f.Fs.Features().UserInfo
|
||||||
|
if do == nil {
|
||||||
|
return nil, fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disconnect the current user
|
||||||
|
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||||
|
do := f.Fs.Features().Disconnect
|
||||||
|
if do == nil {
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
@@ -1878,4 +1897,6 @@ var (
|
|||||||
_ fs.ListRer = (*Fs)(nil)
|
_ fs.ListRer = (*Fs)(nil)
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
|
_ fs.UserInfoer = (*Fs)(nil)
|
||||||
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
177
backend/cache/cache_internal_test.go
vendored
177
backend/cache/cache_internal_test.go
vendored
@@ -4,6 +4,7 @@ package cache_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
goflag "flag"
|
goflag "flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -21,19 +22,20 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/cache"
|
|
||||||
"github.com/ncw/rclone/backend/crypt"
|
|
||||||
_ "github.com/ncw/rclone/backend/drive"
|
|
||||||
"github.com/ncw/rclone/backend/local"
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/config"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/object"
|
|
||||||
"github.com/ncw/rclone/fs/rc"
|
|
||||||
"github.com/ncw/rclone/fstest"
|
|
||||||
"github.com/ncw/rclone/vfs"
|
|
||||||
"github.com/ncw/rclone/vfs/vfsflags"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/backend/cache"
|
||||||
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
|
"github.com/rclone/rclone/backend/local"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/object"
|
||||||
|
"github.com/rclone/rclone/fs/rc"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/lib/random"
|
||||||
|
"github.com/rclone/rclone/vfs"
|
||||||
|
"github.com/rclone/rclone/vfs/vfsflags"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@@ -120,7 +122,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
|
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
listInner, err := rootFs2.List("")
|
listInner, err := rootFs2.List(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Len(t, listRoot, 1)
|
require.Len(t, listRoot, 1)
|
||||||
@@ -138,10 +140,10 @@ func TestInternalVfsCache(t *testing.T) {
|
|||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("test")
|
err := rootFs.Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
runInstance.writeObjectString(t, rootFs, "test/second", "content")
|
runInstance.writeObjectString(t, rootFs, "test/second", "content")
|
||||||
_, err = rootFs.List("test")
|
_, err = rootFs.List(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testReader := runInstance.randomReader(t, testSize)
|
testReader := runInstance.randomReader(t, testSize)
|
||||||
@@ -266,7 +268,7 @@ func TestInternalObjNotFound(t *testing.T) {
|
|||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
obj, err := rootFs.NewObject("404")
|
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Nil(t, obj)
|
require.Nil(t, obj)
|
||||||
}
|
}
|
||||||
@@ -354,8 +356,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
|||||||
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
|
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
} else {
|
} else {
|
||||||
testData1 = []byte(fstest.RandomString(100))
|
testData1 = []byte(random.String(100))
|
||||||
testData2 = []byte(fstest.RandomString(200))
|
testData2 = []byte(random.String(200))
|
||||||
}
|
}
|
||||||
|
|
||||||
// write the object
|
// write the object
|
||||||
@@ -387,10 +389,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||||||
|
|
||||||
// write the object
|
// write the object
|
||||||
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||||
require.Equal(t, o.Size(), int64(testSize))
|
require.Equal(t, o.Size(), testSize)
|
||||||
time.Sleep(time.Second * 3)
|
time.Sleep(time.Second * 3)
|
||||||
|
|
||||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||||
|
|
||||||
@@ -445,7 +447,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("original size: %v", originalSize)
|
log.Printf("original size: %v", originalSize)
|
||||||
|
|
||||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedSize := int64(len([]byte("test content")))
|
expectedSize := int64(len([]byte("test content")))
|
||||||
var data2 []byte
|
var data2 []byte
|
||||||
@@ -457,7 +459,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
data2 = []byte("test content")
|
data2 = []byte("test content")
|
||||||
}
|
}
|
||||||
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
|
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
|
||||||
err = o.Update(bytes.NewReader(data2), objInfo)
|
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data2)), o.Size())
|
require.Equal(t, int64(len(data2)), o.Size())
|
||||||
log.Printf("updated size: %v", len(data2))
|
log.Printf("updated size: %v", len(data2))
|
||||||
@@ -503,9 +505,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
testData = []byte("test content")
|
testData = []byte("test content")
|
||||||
}
|
}
|
||||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test"))
|
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test"))
|
||||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one"))
|
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one"))
|
||||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second"))
|
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second"))
|
||||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||||
|
|
||||||
// list in mount
|
// list in mount
|
||||||
@@ -515,7 +517,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// move file
|
// move file
|
||||||
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
@@ -589,9 +591,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
testData = []byte("test content")
|
testData = []byte("test content")
|
||||||
}
|
}
|
||||||
err = rootFs.Mkdir("test")
|
err = rootFs.Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("test/one")
|
err = rootFs.Mkdir(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||||
|
|
||||||
@@ -608,7 +610,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
require.False(t, found)
|
require.False(t, found)
|
||||||
|
|
||||||
// move file
|
// move file
|
||||||
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
@@ -670,23 +672,23 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
|||||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||||
|
|
||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||||
err = o.SetModTime(wrappedTime)
|
err = o.SetModTime(context.Background(), wrappedTime)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err := rootFs.NewObject("data.bin")
|
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
||||||
|
|
||||||
cfs.DirCacheFlush() // flush the cache
|
cfs.DirCacheFlush() // flush the cache
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err = rootFs.NewObject("data.bin")
|
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||||
@@ -713,40 +715,44 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
|||||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||||
|
|
||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||||
err = o.SetModTime(wrappedTime)
|
err = o.SetModTime(context.Background(), wrappedTime)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err := rootFs.NewObject("data.bin")
|
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
||||||
|
|
||||||
// Call the rc function
|
// Call the rc function
|
||||||
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
|
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
|
||||||
|
require.NoError(t, err)
|
||||||
require.Contains(t, m, "status")
|
require.Contains(t, m, "status")
|
||||||
require.Contains(t, m, "message")
|
require.Contains(t, m, "message")
|
||||||
require.Equal(t, "ok", m["status"])
|
require.Equal(t, "ok", m["status"])
|
||||||
require.Contains(t, m["message"], "cached file cleared")
|
require.Contains(t, m["message"], "cached file cleared")
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err = rootFs.NewObject("data.bin")
|
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||||
|
_, err = runInstance.list(t, rootFs, "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
|
||||||
li1, err := runInstance.list(t, rootFs, "")
|
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
testData2 := randStringBytes(int(chunkSize))
|
testData2 := randStringBytes(int(chunkSize))
|
||||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
||||||
|
|
||||||
// list should have 1 item only
|
// list should have 1 item only
|
||||||
li1, err = runInstance.list(t, rootFs, "")
|
li1, err := runInstance.list(t, rootFs, "")
|
||||||
|
require.NoError(t, err)
|
||||||
require.Len(t, li1, 1)
|
require.Len(t, li1, 1)
|
||||||
|
|
||||||
// Call the rc function
|
// Call the rc function
|
||||||
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
|
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
|
||||||
|
require.NoError(t, err)
|
||||||
require.Contains(t, m, "status")
|
require.Contains(t, m, "status")
|
||||||
require.Contains(t, m, "message")
|
require.Contains(t, m, "message")
|
||||||
require.Equal(t, "ok", m["status"])
|
require.Equal(t, "ok", m["status"])
|
||||||
@@ -754,6 +760,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
|||||||
|
|
||||||
// list should have 2 items now
|
// list should have 2 items now
|
||||||
li2, err := runInstance.list(t, rootFs, "")
|
li2, err := runInstance.list(t, rootFs, "")
|
||||||
|
require.NoError(t, err)
|
||||||
require.Len(t, li2, 2)
|
require.Len(t, li2, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -789,7 +796,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
// create some rand test data
|
// create some rand test data
|
||||||
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
|
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||||
o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
co, ok := o.(*cache.Object)
|
co, ok := o.(*cache.Object)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
@@ -828,7 +835,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, l, 1)
|
require.Len(t, l, 1)
|
||||||
|
|
||||||
err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
|
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
l, err = runInstance.list(t, rootFs, "test")
|
l, err = runInstance.list(t, rootFs, "test")
|
||||||
@@ -863,14 +870,14 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = cfs.UnWrap().Mkdir("test")
|
err = cfs.UnWrap().Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for i := 1; i <= 4; i++ {
|
for i := 1; i <= 4; i++ {
|
||||||
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i))
|
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for j := 1; j <= 4; j++ {
|
for j := 1; j <= 4; j++ {
|
||||||
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j))
|
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
|
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
|
||||||
@@ -1075,10 +1082,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
if purge {
|
if purge {
|
||||||
_ = f.Features().Purge()
|
_ = f.Features().Purge(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
err = f.Mkdir("")
|
err = f.Mkdir(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if r.useMount && !r.isMounted {
|
if r.useMount && !r.isMounted {
|
||||||
r.mountFs(t, f)
|
r.mountFs(t, f)
|
||||||
@@ -1092,7 +1099,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
|||||||
r.unmountFs(t, f)
|
r.unmountFs(t, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := f.Features().Purge()
|
err := f.Features().Purge(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -1194,7 +1201,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read
|
|||||||
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
||||||
in := bytes.NewReader(data)
|
in := bytes.NewReader(data)
|
||||||
_ = r.writeObjectReader(t, f, remote, in)
|
_ = r.writeObjectReader(t, f, remote, in)
|
||||||
o, err := f.NewObject(remote)
|
o, err := f.NewObject(context.Background(), remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data)), o.Size())
|
require.Equal(t, int64(len(data)), o.Size())
|
||||||
return o
|
return o
|
||||||
@@ -1203,7 +1210,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
|
|||||||
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
|
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
|
||||||
modTime := time.Now()
|
modTime := time.Now()
|
||||||
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
||||||
obj, err := f.Put(in, objInfo)
|
obj, err := f.Put(context.Background(), in, objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if r.useMount {
|
if r.useMount {
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
@@ -1223,18 +1230,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
|
|||||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
obj, err = f.NewObject(remote)
|
obj, err = f.NewObject(context.Background(), remote)
|
||||||
} else {
|
} else {
|
||||||
in1 := bytes.NewReader(data1)
|
in1 := bytes.NewReader(data1)
|
||||||
in2 := bytes.NewReader(data2)
|
in2 := bytes.NewReader(data2)
|
||||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||||
|
|
||||||
obj, err = f.Put(in1, objInfo1)
|
obj, err = f.Put(context.Background(), in1, objInfo1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
obj, err = f.NewObject(remote)
|
obj, err = f.NewObject(context.Background(), remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = obj.Update(in2, objInfo2)
|
err = obj.Update(context.Background(), in2, objInfo2)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -1263,7 +1270,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
|||||||
return checkSample, err
|
return checkSample, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
co, err := f.NewObject(remote)
|
co, err := f.NewObject(context.Background(), remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return checkSample, err
|
return checkSample, err
|
||||||
}
|
}
|
||||||
@@ -1278,7 +1285,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
|||||||
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
|
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
|
||||||
size := end - offset
|
size := end - offset
|
||||||
checkSample := make([]byte, size)
|
checkSample := make([]byte, size)
|
||||||
reader, err := o.Open(&fs.SeekOption{Offset: offset})
|
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
totalRead, err := io.ReadFull(reader, checkSample)
|
totalRead, err := io.ReadFull(reader, checkSample)
|
||||||
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
|
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
|
||||||
@@ -1295,7 +1302,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
|
|||||||
if r.useMount {
|
if r.useMount {
|
||||||
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
||||||
} else {
|
} else {
|
||||||
err = f.Mkdir(remote)
|
err = f.Mkdir(context.Background(), remote)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -1307,11 +1314,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
|||||||
err = os.Remove(path.Join(r.mntDir, remote))
|
err = os.Remove(path.Join(r.mntDir, remote))
|
||||||
} else {
|
} else {
|
||||||
var obj fs.Object
|
var obj fs.Object
|
||||||
obj, err = f.NewObject(remote)
|
obj, err = f.NewObject(context.Background(), remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = f.Rmdir(remote)
|
err = f.Rmdir(context.Background(), remote)
|
||||||
} else {
|
} else {
|
||||||
err = obj.Remove()
|
err = obj.Remove(context.Background())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1329,7 +1336,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var list fs.DirEntries
|
var list fs.DirEntries
|
||||||
list, err = f.List(remote)
|
list, err = f.List(context.Background(), remote)
|
||||||
for _, ll := range list {
|
for _, ll := range list {
|
||||||
l = append(l, ll)
|
l = append(l, ll)
|
||||||
}
|
}
|
||||||
@@ -1348,7 +1355,7 @@ func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var list fs.DirEntries
|
var list fs.DirEntries
|
||||||
list, err = f.List(remote)
|
list, err = f.List(context.Background(), remote)
|
||||||
for _, ll := range list {
|
for _, ll := range list {
|
||||||
l = append(l, ll.Remote())
|
l = append(l, ll.Remote())
|
||||||
}
|
}
|
||||||
@@ -1388,7 +1395,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
}
|
}
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
} else if rootFs.Features().DirMove != nil {
|
} else if rootFs.Features().DirMove != nil {
|
||||||
err = rootFs.Features().DirMove(rootFs, src, dst)
|
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1410,11 +1417,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
}
|
}
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
} else if rootFs.Features().Move != nil {
|
} else if rootFs.Features().Move != nil {
|
||||||
obj1, err := rootFs.NewObject(src)
|
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = rootFs.Features().Move(obj1, dst)
|
_, err = rootFs.Features().Move(context.Background(), obj1, dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1436,11 +1443,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
}
|
}
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
} else if rootFs.Features().Copy != nil {
|
} else if rootFs.Features().Copy != nil {
|
||||||
obj, err := rootFs.NewObject(src)
|
obj, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = rootFs.Features().Copy(obj, dst)
|
_, err = rootFs.Features().Copy(context.Background(), obj, dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1462,11 +1469,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
|
|||||||
}
|
}
|
||||||
return fi.ModTime(), nil
|
return fi.ModTime(), nil
|
||||||
}
|
}
|
||||||
obj1, err := rootFs.NewObject(src)
|
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return time.Time{}, err
|
return time.Time{}, err
|
||||||
}
|
}
|
||||||
return obj1.ModTime(), nil
|
return obj1.ModTime(context.Background()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||||
@@ -1479,7 +1486,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
|||||||
}
|
}
|
||||||
return fi.Size(), nil
|
return fi.Size(), nil
|
||||||
}
|
}
|
||||||
obj1, err := rootFs.NewObject(src)
|
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return int64(0), err
|
return int64(0), err
|
||||||
}
|
}
|
||||||
@@ -1490,7 +1497,8 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
if r.useMount {
|
if r.useMount {
|
||||||
f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
var f *os.File
|
||||||
|
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1500,14 +1508,15 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||||||
}()
|
}()
|
||||||
_, err = f.WriteString(data + append)
|
_, err = f.WriteString(data + append)
|
||||||
} else {
|
} else {
|
||||||
obj1, err := rootFs.NewObject(src)
|
var obj1 fs.Object
|
||||||
|
obj1, err = rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
data1 := []byte(data + append)
|
data1 := []byte(data + append)
|
||||||
r := bytes.NewReader(data1)
|
r := bytes.NewReader(data1)
|
||||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||||
err = obj1.Update(r, objInfo1)
|
err = obj1.Update(context.Background(), r, objInfo1)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@@ -1632,15 +1641,13 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
|
|||||||
cfs, ok := f.(*cache.Fs)
|
cfs, ok := f.(*cache.Fs)
|
||||||
if ok {
|
if ok {
|
||||||
return cfs, nil
|
return cfs, nil
|
||||||
} else {
|
}
|
||||||
if f.Features().UnWrap != nil {
|
if f.Features().UnWrap != nil {
|
||||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||||
if ok {
|
if ok {
|
||||||
return cfs, nil
|
return cfs, nil
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.New("didn't found a cache fs")
|
return nil, errors.New("didn't found a cache fs")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
6
backend/cache/cache_mount_unix_test.go
vendored
6
backend/cache/cache_mount_unix_test.go
vendored
@@ -9,9 +9,9 @@ import (
|
|||||||
|
|
||||||
"bazil.org/fuse"
|
"bazil.org/fuse"
|
||||||
fusefs "bazil.org/fuse/fs"
|
fusefs "bazil.org/fuse/fs"
|
||||||
"github.com/ncw/rclone/cmd/mount"
|
"github.com/rclone/rclone/cmd/mount"
|
||||||
"github.com/ncw/rclone/cmd/mountlib"
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
6
backend/cache/cache_mount_windows_test.go
vendored
6
backend/cache/cache_mount_windows_test.go
vendored
@@ -9,10 +9,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/billziss-gh/cgofuse/fuse"
|
"github.com/billziss-gh/cgofuse/fuse"
|
||||||
"github.com/ncw/rclone/cmd/cmount"
|
|
||||||
"github.com/ncw/rclone/cmd/mountlib"
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/cmd/cmount"
|
||||||
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
12
backend/cache/cache_test.go
vendored
12
backend/cache/cache_test.go
vendored
@@ -7,15 +7,17 @@ package cache_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/cache"
|
"github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/ncw/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestCache:",
|
RemoteName: "TestCache:",
|
||||||
NilObject: (*cache.Object)(nil),
|
NilObject: (*cache.Object)(nil),
|
||||||
|
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
|
||||||
|
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
53
backend/cache/cache_upload_test.go
vendored
53
backend/cache/cache_upload_test.go
vendored
@@ -3,6 +3,7 @@
|
|||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
@@ -11,9 +12,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/cache"
|
"github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/ncw/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -85,11 +86,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
|||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("one")
|
err := rootFs.Mkdir(context.Background(), "one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("one/test")
|
err = rootFs.Mkdir(context.Background(), "one/test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("second")
|
err = rootFs.Mkdir(context.Background(), "second")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
@@ -122,11 +123,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
|||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("one")
|
err := rootFs.Mkdir(context.Background(), "one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("one/test")
|
err = rootFs.Mkdir(context.Background(), "one/test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("second")
|
err = rootFs.Mkdir(context.Background(), "second")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
@@ -165,7 +166,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
|||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("test")
|
err := rootFs.Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
minSize := 5242880
|
minSize := 5242880
|
||||||
maxSize := 10485760
|
maxSize := 10485760
|
||||||
@@ -233,9 +234,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject("second/one")
|
_, err = rootFs.NewObject(context.Background(), "second/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -256,7 +257,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
err = runInstance.rm(t, rootFs, "test")
|
err = runInstance.rm(t, rootFs, "test")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), "directory not empty")
|
require.Contains(t, err.Error(), "directory not empty")
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -270,9 +271,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// try to read from it
|
// try to read from it
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject("test/second")
|
_, err = rootFs.NewObject(context.Background(), "test/second")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -289,9 +290,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/third")
|
_, err = rootFs.NewObject(context.Background(), "test/third")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -306,7 +307,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
// test Remove -- allowed
|
// test Remove -- allowed
|
||||||
err = runInstance.rm(t, rootFs, "test/one")
|
err = runInstance.rm(t, rootFs, "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
// validate that it doesn't exist in temp fs
|
// validate that it doesn't exist in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -318,7 +319,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
obj2, err := rootFs.NewObject("test/one")
|
obj2, err := rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||||
require.Equal(t, "one content updated", string(data2))
|
require.Equal(t, "one content updated", string(data2))
|
||||||
@@ -366,7 +367,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -378,7 +379,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
// test Rmdir
|
// test Rmdir
|
||||||
err = runInstance.rm(t, rootFs, "test")
|
err = runInstance.rm(t, rootFs, "test")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it doesn't exist in temp fs
|
// validate that it doesn't exist in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -389,9 +390,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
// try to read from it
|
// try to read from it
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/second")
|
_, err = rootFs.NewObject(context.Background(), "test/second")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -404,9 +405,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/third")
|
_, err = rootFs.NewObject(context.Background(), "test/third")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -421,7 +422,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
// test Remove
|
// test Remove
|
||||||
err = runInstance.rm(t, rootFs, "test/one")
|
err = runInstance.rm(t, rootFs, "test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it doesn't exist in temp fs
|
// validate that it doesn't exist in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
|||||||
9
backend/cache/directory.go
vendored
9
backend/cache/directory.go
vendored
@@ -3,10 +3,11 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"path"
|
"path"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Directory is a generic dir that stores basic information about it
|
// Directory is a generic dir that stores basic information about it
|
||||||
@@ -55,7 +56,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirectoryFromOriginal builds one from a generic fs.Directory
|
// DirectoryFromOriginal builds one from a generic fs.Directory
|
||||||
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory {
|
||||||
var cd *Directory
|
var cd *Directory
|
||||||
fullRemote := path.Join(f.Root(), d.Remote())
|
fullRemote := path.Join(f.Root(), d.Remote())
|
||||||
|
|
||||||
@@ -67,7 +68,7 @@ func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
|||||||
CacheFs: f,
|
CacheFs: f,
|
||||||
Name: name,
|
Name: name,
|
||||||
Dir: dir,
|
Dir: dir,
|
||||||
CacheModTime: d.ModTime().UnixNano(),
|
CacheModTime: d.ModTime(ctx).UnixNano(),
|
||||||
CacheSize: d.Size(),
|
CacheSize: d.Size(),
|
||||||
CacheItems: d.Items(),
|
CacheItems: d.Items(),
|
||||||
CacheType: "Directory",
|
CacheType: "Directory",
|
||||||
@@ -110,7 +111,7 @@ func (d *Directory) parentRemote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the cached ModTime
|
// ModTime returns the cached ModTime
|
||||||
func (d *Directory) ModTime() time.Time {
|
func (d *Directory) ModTime(ctx context.Context) time.Time {
|
||||||
return time.Unix(0, d.CacheModTime)
|
return time.Unix(0, d.CacheModTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
25
backend/cache/handle.go
vendored
25
backend/cache/handle.go
vendored
@@ -3,6 +3,7 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@@ -11,9 +12,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/operations"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
|
|
||||||
var uploaderMap = make(map[string]*backgroundWriter)
|
var uploaderMap = make(map[string]*backgroundWriter)
|
||||||
@@ -40,6 +41,7 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
|
|||||||
|
|
||||||
// Handle is managing the read/write/seek operations on an open handle
|
// Handle is managing the read/write/seek operations on an open handle
|
||||||
type Handle struct {
|
type Handle struct {
|
||||||
|
ctx context.Context
|
||||||
cachedObject *Object
|
cachedObject *Object
|
||||||
cfs *Fs
|
cfs *Fs
|
||||||
memory *Memory
|
memory *Memory
|
||||||
@@ -58,8 +60,9 @@ type Handle struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObjectHandle returns a new Handle for an existing Object
|
// NewObjectHandle returns a new Handle for an existing Object
|
||||||
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle {
|
||||||
r := &Handle{
|
r := &Handle{
|
||||||
|
ctx: ctx,
|
||||||
cachedObject: o,
|
cachedObject: o,
|
||||||
cfs: cfs,
|
cfs: cfs,
|
||||||
offset: 0,
|
offset: 0,
|
||||||
@@ -351,7 +354,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
|||||||
r := w.rc
|
r := w.rc
|
||||||
if w.rc == nil {
|
if w.rc == nil {
|
||||||
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||||
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -361,7 +364,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
|||||||
|
|
||||||
if !closeOpen {
|
if !closeOpen {
|
||||||
if do, ok := r.(fs.RangeSeeker); ok {
|
if do, ok := r.(fs.RangeSeeker); ok {
|
||||||
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
|
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset)
|
||||||
return r, err
|
return r, err
|
||||||
} else if do, ok := r.(io.Seeker); ok {
|
} else if do, ok := r.(io.Seeker); ok {
|
||||||
_, err = do.Seek(offset, io.SeekStart)
|
_, err = do.Seek(offset, io.SeekStart)
|
||||||
@@ -371,7 +374,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
|||||||
|
|
||||||
_ = w.rc.Close()
|
_ = w.rc.Close()
|
||||||
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||||
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -449,7 +452,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
|||||||
// we seem to be getting only errors so we abort
|
// we seem to be getting only errors so we abort
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
||||||
err = w.r.cachedObject.refreshFromSource(true)
|
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "%v", err)
|
fs.Errorf(w, "%v", err)
|
||||||
}
|
}
|
||||||
@@ -462,7 +465,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
|||||||
sourceRead, err = io.ReadFull(w.rc, data)
|
sourceRead, err = io.ReadFull(w.rc, data)
|
||||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||||
err = w.r.cachedObject.refreshFromSource(true)
|
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "%v", err)
|
fs.Errorf(w, "%v", err)
|
||||||
}
|
}
|
||||||
@@ -588,7 +591,7 @@ func (b *backgroundWriter) run() {
|
|||||||
remote := b.fs.cleanRootFromPath(absPath)
|
remote := b.fs.cleanRootFromPath(absPath)
|
||||||
b.notify(remote, BackgroundUploadStarted, nil)
|
b.notify(remote, BackgroundUploadStarted, nil)
|
||||||
fs.Infof(remote, "background upload: started upload")
|
fs.Infof(remote, "background upload: started upload")
|
||||||
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.notify(remote, BackgroundUploadError, err)
|
b.notify(remote, BackgroundUploadError, err)
|
||||||
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
||||||
@@ -598,14 +601,14 @@ func (b *backgroundWriter) run() {
|
|||||||
// clean empty dirs up to root
|
// clean empty dirs up to root
|
||||||
thisDir := cleanPath(path.Dir(remote))
|
thisDir := cleanPath(path.Dir(remote))
|
||||||
for thisDir != "" {
|
for thisDir != "" {
|
||||||
thisList, err := b.fs.tempFs.List(thisDir)
|
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if len(thisList) > 0 {
|
if len(thisList) > 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
err = b.fs.tempFs.Rmdir(thisDir)
|
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir)
|
||||||
fs.Debugf(thisDir, "cleaned from temp path")
|
fs.Debugf(thisDir, "cleaned from temp path")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
|
|||||||
71
backend/cache/object.go
vendored
71
backend/cache/object.go
vendored
@@ -3,15 +3,16 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/lib/readers"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -68,7 +69,7 @@ func NewObject(f *Fs, remote string) *Object {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ObjectFromOriginal builds one from a generic fs.Object
|
// ObjectFromOriginal builds one from a generic fs.Object
|
||||||
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
|
||||||
var co *Object
|
var co *Object
|
||||||
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
||||||
dir, name := path.Split(fullRemote)
|
dir, name := path.Split(fullRemote)
|
||||||
@@ -92,13 +93,13 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
|||||||
CacheType: cacheType,
|
CacheType: cacheType,
|
||||||
CacheTs: time.Now(),
|
CacheTs: time.Now(),
|
||||||
}
|
}
|
||||||
co.updateData(o)
|
co.updateData(ctx, o)
|
||||||
return co
|
return co
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) updateData(source fs.Object) {
|
func (o *Object) updateData(ctx context.Context, source fs.Object) {
|
||||||
o.Object = source
|
o.Object = source
|
||||||
o.CacheModTime = source.ModTime().UnixNano()
|
o.CacheModTime = source.ModTime(ctx).UnixNano()
|
||||||
o.CacheSize = source.Size()
|
o.CacheSize = source.Size()
|
||||||
o.CacheStorable = source.Storable()
|
o.CacheStorable = source.Storable()
|
||||||
o.CacheTs = time.Now()
|
o.CacheTs = time.Now()
|
||||||
@@ -130,20 +131,20 @@ func (o *Object) abs() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the cached ModTime
|
// ModTime returns the cached ModTime
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
_ = o.refresh()
|
_ = o.refresh(ctx)
|
||||||
return time.Unix(0, o.CacheModTime)
|
return time.Unix(0, o.CacheModTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size returns the cached Size
|
// Size returns the cached Size
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
_ = o.refresh()
|
_ = o.refresh(context.TODO())
|
||||||
return o.CacheSize
|
return o.CacheSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storable returns the cached Storable
|
// Storable returns the cached Storable
|
||||||
func (o *Object) Storable() bool {
|
func (o *Object) Storable() bool {
|
||||||
_ = o.refresh()
|
_ = o.refresh(context.TODO())
|
||||||
return o.CacheStorable
|
return o.CacheStorable
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,18 +152,18 @@ func (o *Object) Storable() bool {
|
|||||||
// all these conditions must be true to ignore a refresh
|
// all these conditions must be true to ignore a refresh
|
||||||
// 1. cache ts didn't expire yet
|
// 1. cache ts didn't expire yet
|
||||||
// 2. is not pending a notification from the wrapped fs
|
// 2. is not pending a notification from the wrapped fs
|
||||||
func (o *Object) refresh() error {
|
func (o *Object) refresh(ctx context.Context) error {
|
||||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||||
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||||
if !isExpired && !isNotified {
|
if !isExpired && !isNotified {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return o.refreshFromSource(true)
|
return o.refreshFromSource(ctx, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
||||||
func (o *Object) refreshFromSource(force bool) error {
|
func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
||||||
o.refreshMutex.Lock()
|
o.refreshMutex.Lock()
|
||||||
defer o.refreshMutex.Unlock()
|
defer o.refreshMutex.Unlock()
|
||||||
var err error
|
var err error
|
||||||
@@ -172,29 +173,29 @@ func (o *Object) refreshFromSource(force bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if o.isTempFile() {
|
if o.isTempFile() {
|
||||||
liveObject, err = o.ParentFs.NewObject(o.Remote())
|
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||||
} else {
|
} else {
|
||||||
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
|
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
o.updateData(liveObject)
|
o.updateData(ctx, liveObject)
|
||||||
o.persist()
|
o.persist()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the ModTime of this object
|
// SetModTime sets the ModTime of this object
|
||||||
func (o *Object) SetModTime(t time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||||
if err := o.refreshFromSource(false); err != nil {
|
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err := o.Object.SetModTime(t)
|
err := o.Object.SetModTime(ctx, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -207,19 +208,19 @@ func (o *Object) SetModTime(t time.Time) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open is used to request a specific part of the file using fs.RangeOption
|
// Open is used to request a specific part of the file using fs.RangeOption
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if o.Object == nil {
|
if o.Object == nil {
|
||||||
err = o.refreshFromSource(true)
|
err = o.refreshFromSource(ctx, true)
|
||||||
} else {
|
} else {
|
||||||
err = o.refresh()
|
err = o.refresh(ctx)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheReader := NewObjectHandle(o, o.CacheFs)
|
cacheReader := NewObjectHandle(ctx, o, o.CacheFs)
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@@ -238,8 +239,8 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update will change the object data
|
// Update will change the object data
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
if err := o.refreshFromSource(false); err != nil {
|
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// pause background uploads if active
|
// pause background uploads if active
|
||||||
@@ -254,7 +255,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||||
|
|
||||||
// FIXME use reliable upload
|
// FIXME use reliable upload
|
||||||
err := o.Object.Update(in, src, options...)
|
err := o.Object.Update(ctx, in, src, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "error updating source: %v", err)
|
fs.Errorf(o, "error updating source: %v", err)
|
||||||
return err
|
return err
|
||||||
@@ -265,7 +266,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// advertise to ChangeNotify if wrapped doesn't do that
|
// advertise to ChangeNotify if wrapped doesn't do that
|
||||||
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
||||||
|
|
||||||
o.CacheModTime = src.ModTime().UnixNano()
|
o.CacheModTime = src.ModTime(ctx).UnixNano()
|
||||||
o.CacheSize = src.Size()
|
o.CacheSize = src.Size()
|
||||||
o.CacheHashes = make(map[hash.Type]string)
|
o.CacheHashes = make(map[hash.Type]string)
|
||||||
o.CacheTs = time.Now()
|
o.CacheTs = time.Now()
|
||||||
@@ -275,8 +276,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove deletes the object from both the cache and the source
|
// Remove deletes the object from both the cache and the source
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
if err := o.refreshFromSource(false); err != nil {
|
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// pause background uploads if active
|
// pause background uploads if active
|
||||||
@@ -288,7 +289,7 @@ func (o *Object) Remove() error {
|
|||||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := o.Object.Remove()
|
err := o.Object.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -306,8 +307,8 @@ func (o *Object) Remove() error {
|
|||||||
|
|
||||||
// Hash requests a hash of the object and stores in the cache
|
// Hash requests a hash of the object and stores in the cache
|
||||||
// since it might or might not be called, this is lazy loaded
|
// since it might or might not be called, this is lazy loaded
|
||||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
_ = o.refresh()
|
_ = o.refresh(ctx)
|
||||||
if o.CacheHashes == nil {
|
if o.CacheHashes == nil {
|
||||||
o.CacheHashes = make(map[hash.Type]string)
|
o.CacheHashes = make(map[hash.Type]string)
|
||||||
}
|
}
|
||||||
@@ -316,10 +317,10 @@ func (o *Object) Hash(ht hash.Type) (string, error) {
|
|||||||
if found {
|
if found {
|
||||||
return cachedHash, nil
|
return cachedHash, nil
|
||||||
}
|
}
|
||||||
if err := o.refreshFromSource(false); err != nil {
|
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
liveHash, err := o.Object.Hash(ht)
|
liveHash, err := o.Object.Hash(ctx, ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|||||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -14,8 +14,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
cache "github.com/patrickmn/go-cache"
|
cache "github.com/patrickmn/go-cache"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"golang.org/x/net/websocket"
|
"golang.org/x/net/websocket"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -7,9 +7,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
cache "github.com/patrickmn/go-cache"
|
cache "github.com/patrickmn/go-cache"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Memory is a wrapper of transient storage for a go-cache store
|
// Memory is a wrapper of transient storage for a go-cache store
|
||||||
|
|||||||
15
backend/cache/storage_persistent.go
vendored
15
backend/cache/storage_persistent.go
vendored
@@ -4,6 +4,7 @@ package cache
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -16,9 +17,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
bolt "github.com/coreos/bbolt"
|
bolt "github.com/coreos/bbolt"
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/walk"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/walk"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@@ -398,7 +399,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
|
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
@@ -809,7 +810,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
err = bucket.Put([]byte(destPath), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
@@ -1014,7 +1015,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
||||||
func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
|
||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
_ = tx.DeleteBucket([]byte(tempBucket))
|
_ = tx.DeleteBucket([]byte(tempBucket))
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
@@ -1023,7 +1024,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var queuedEntries []fs.Object
|
var queuedEntries []fs.Object
|
||||||
err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||||
for _, o := range entries {
|
for _, o := range entries {
|
||||||
if oo, ok := o.(fs.Object); ok {
|
if oo, ok := o.(fs.Object); ok {
|
||||||
queuedEntries = append(queuedEntries, oo)
|
queuedEntries = append(queuedEntries, oo)
|
||||||
@@ -1049,7 +1050,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
err = bucket.Put([]byte(destPath), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package crypt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
gocipher "crypto/cipher"
|
gocipher "crypto/cipher"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
@@ -13,10 +14,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/crypt/pkcs7"
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/accounting"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rfjakob/eme"
|
"github.com/rfjakob/eme"
|
||||||
"golang.org/x/crypto/nacl/secretbox"
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
"golang.org/x/crypto/scrypt"
|
"golang.org/x/crypto/scrypt"
|
||||||
@@ -68,7 +69,7 @@ type ReadSeekCloser interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OpenRangeSeek opens the file handle at the offset with the limit given
|
// OpenRangeSeek opens the file handle at the offset with the limit given
|
||||||
type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
|
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
|
||||||
|
|
||||||
// Cipher is used to swap out the encryption implementations
|
// Cipher is used to swap out the encryption implementations
|
||||||
type Cipher interface {
|
type Cipher interface {
|
||||||
@@ -85,7 +86,7 @@ type Cipher interface {
|
|||||||
// DecryptData
|
// DecryptData
|
||||||
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
||||||
// DecryptDataSeek decrypt at a given position
|
// DecryptDataSeek decrypt at a given position
|
||||||
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
||||||
// EncryptedSize calculates the size of the data when encrypted
|
// EncryptedSize calculates the size of the data when encrypted
|
||||||
EncryptedSize(int64) int64
|
EncryptedSize(int64) int64
|
||||||
// DecryptedSize calculates the size of the data when decrypted
|
// DecryptedSize calculates the size of the data when decrypted
|
||||||
@@ -144,7 +145,6 @@ type cipher struct {
|
|||||||
buffers sync.Pool // encrypt/decrypt buffers
|
buffers sync.Pool // encrypt/decrypt buffers
|
||||||
cryptoRand io.Reader // read crypto random numbers from here
|
cryptoRand io.Reader // read crypto random numbers from here
|
||||||
dirNameEncrypt bool
|
dirNameEncrypt bool
|
||||||
passCorrupted bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||||
@@ -164,11 +164,6 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
|||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set to pass corrupted blocks
|
|
||||||
func (c *cipher) setPassCorrupted(passCorrupted bool) {
|
|
||||||
c.passCorrupted = passCorrupted
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key creates all the internal keys from the password passed in using
|
// Key creates all the internal keys from the password passed in using
|
||||||
// scrypt.
|
// scrypt.
|
||||||
//
|
//
|
||||||
@@ -469,7 +464,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
if int(newRune) < base {
|
if int(newRune) < base {
|
||||||
newRune += 256
|
newRune += 256
|
||||||
}
|
}
|
||||||
_, _ = result.WriteRune(rune(newRune))
|
_, _ = result.WriteRune(newRune)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
_, _ = result.WriteRune(runeValue)
|
_, _ = result.WriteRune(runeValue)
|
||||||
@@ -754,29 +749,29 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
|||||||
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
|
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
|
||||||
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
|
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
|
||||||
}
|
}
|
||||||
// retreive the nonce
|
// retrieve the nonce
|
||||||
fh.nonce.fromBuf(readBuf[fileMagicSize:])
|
fh.nonce.fromBuf(readBuf[fileMagicSize:])
|
||||||
fh.initialNonce = fh.nonce
|
fh.initialNonce = fh.nonce
|
||||||
return fh, nil
|
return fh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDecrypterSeek creates a new file handle decrypting on the fly
|
// newDecrypterSeek creates a new file handle decrypting on the fly
|
||||||
func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||||
var rc io.ReadCloser
|
var rc io.ReadCloser
|
||||||
doRangeSeek := false
|
doRangeSeek := false
|
||||||
setLimit := false
|
setLimit := false
|
||||||
// Open initially with no seek
|
// Open initially with no seek
|
||||||
if offset == 0 && limit < 0 {
|
if offset == 0 && limit < 0 {
|
||||||
// If no offset or limit then open whole file
|
// If no offset or limit then open whole file
|
||||||
rc, err = open(0, -1)
|
rc, err = open(ctx, 0, -1)
|
||||||
} else if offset == 0 {
|
} else if offset == 0 {
|
||||||
// If no offset open the header + limit worth of the file
|
// If no offset open the header + limit worth of the file
|
||||||
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
|
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
|
||||||
rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
|
rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit)
|
||||||
setLimit = true
|
setLimit = true
|
||||||
} else {
|
} else {
|
||||||
// Otherwise just read the header to start with
|
// Otherwise just read the header to start with
|
||||||
rc, err = open(0, int64(fileHeaderSize))
|
rc, err = open(ctx, 0, int64(fileHeaderSize))
|
||||||
doRangeSeek = true
|
doRangeSeek = true
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -789,7 +784,7 @@ func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *
|
|||||||
}
|
}
|
||||||
fh.open = open // will be called by fh.RangeSeek
|
fh.open = open // will be called by fh.RangeSeek
|
||||||
if doRangeSeek {
|
if doRangeSeek {
|
||||||
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
|
_, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = fh.Close()
|
_ = fh.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -828,10 +823,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err // return pending error as it is likely more accurate
|
return err // return pending error as it is likely more accurate
|
||||||
}
|
}
|
||||||
if !fh.c.passCorrupted {
|
return ErrorEncryptedBadBlock
|
||||||
return ErrorEncryptedBadBlock
|
|
||||||
}
|
|
||||||
fs.Errorf(nil, "passing corrupted block")
|
|
||||||
}
|
}
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
fh.bufSize = n - blockHeaderSize
|
fh.bufSize = n - blockHeaderSize
|
||||||
@@ -912,7 +904,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
|
|||||||
// limiting the total length to limit.
|
// limiting the total length to limit.
|
||||||
//
|
//
|
||||||
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
|
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
|
||||||
func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
|
func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) {
|
||||||
fh.mu.Lock()
|
fh.mu.Lock()
|
||||||
defer fh.mu.Unlock()
|
defer fh.mu.Unlock()
|
||||||
|
|
||||||
@@ -939,7 +931,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
|||||||
// Can we seek underlying stream directly?
|
// Can we seek underlying stream directly?
|
||||||
if do, ok := fh.rc.(fs.RangeSeeker); ok {
|
if do, ok := fh.rc.(fs.RangeSeeker); ok {
|
||||||
// Seek underlying stream directly
|
// Seek underlying stream directly
|
||||||
_, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
|
_, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fh.finish(err)
|
return 0, fh.finish(err)
|
||||||
}
|
}
|
||||||
@@ -949,7 +941,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
|||||||
fh.rc = nil
|
fh.rc = nil
|
||||||
|
|
||||||
// Re-open the underlying object with the offset given
|
// Re-open the underlying object with the offset given
|
||||||
rc, err := fh.open(underlyingOffset, underlyingLimit)
|
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||||
}
|
}
|
||||||
@@ -978,7 +970,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
|||||||
|
|
||||||
// Seek implements the io.Seeker interface
|
// Seek implements the io.Seeker interface
|
||||||
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
||||||
return fh.RangeSeek(offset, whence, -1)
|
return fh.RangeSeek(context.TODO(), offset, whence, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// finish sets the final error and tidies up
|
// finish sets the final error and tidies up
|
||||||
@@ -1052,8 +1044,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
|||||||
// The open function must return a ReadCloser opened to the offset supplied
|
// The open function must return a ReadCloser opened to the offset supplied
|
||||||
//
|
//
|
||||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||||
func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||||
out, err := c.newDecrypterSeek(open, offset, limit)
|
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package crypt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/base32"
|
"encoding/base32"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -9,8 +10,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/crypt/pkcs7"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@@ -965,7 +966,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
|
|
||||||
// Open stream with a seek of underlyingOffset
|
// Open stream with a seek of underlyingOffset
|
||||||
var reader io.ReadCloser
|
var reader io.ReadCloser
|
||||||
open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
end := len(ciphertext)
|
end := len(ciphertext)
|
||||||
if underlyingLimit >= 0 {
|
if underlyingLimit >= 0 {
|
||||||
end = int(underlyingOffset + underlyingLimit)
|
end = int(underlyingOffset + underlyingLimit)
|
||||||
@@ -1006,7 +1007,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
if offset+limit > len(plaintext) {
|
if offset+limit > len(plaintext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
|
rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
check(rc, offset, limit)
|
check(rc, offset, limit)
|
||||||
@@ -1014,14 +1015,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try decoding it with a single open and lots of seeks
|
// Try decoding it with a single open and lots of seeks
|
||||||
fh, err := c.DecryptDataSeek(open, 0, -1)
|
fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
for _, offset := range trials {
|
for _, offset := range trials {
|
||||||
for _, limit := range limits {
|
for _, limit := range limits {
|
||||||
if offset+limit > len(plaintext) {
|
if offset+limit > len(plaintext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
|
_, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
check(fh, offset, limit)
|
check(fh, offset, limit)
|
||||||
@@ -1072,7 +1073,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
|
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
|
||||||
callCount := 0
|
callCount := 0
|
||||||
testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
switch callCount {
|
switch callCount {
|
||||||
case 0:
|
case 0:
|
||||||
assert.Equal(t, int64(0), underlyingOffset, what)
|
assert.Equal(t, int64(0), underlyingOffset, what)
|
||||||
@@ -1084,11 +1085,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
t.Errorf("Too many calls %d for %s", callCount+1, what)
|
t.Errorf("Too many calls %d for %s", callCount+1, what)
|
||||||
}
|
}
|
||||||
callCount++
|
callCount++
|
||||||
return open(underlyingOffset, underlyingLimit)
|
return open(ctx, underlyingOffset, underlyingLimit)
|
||||||
}
|
}
|
||||||
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
|
fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
|
gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, gotOffset, test.offset)
|
assert.Equal(t, gotOffset, test.offset)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,21 +2,23 @@
|
|||||||
package crypt
|
package crypt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/accounting"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
|
||||||
"github.com/ncw/rclone/fs/fspath"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Globals
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -79,15 +81,6 @@ names, or for debugging purposes.`,
|
|||||||
Default: false,
|
Default: false,
|
||||||
Hide: fs.OptionHideConfigurator,
|
Hide: fs.OptionHideConfigurator,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "pass_corrupted_blocks",
|
|
||||||
Help: `Pass through corrupted blocks to the output.
|
|
||||||
|
|
||||||
This is for debugging corruption problems in crypt - it shouldn't be needed normally.
|
|
||||||
`,
|
|
||||||
Default: false,
|
|
||||||
Hide: fs.OptionHideConfigurator,
|
|
||||||
Advanced: true,
|
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -116,7 +109,6 @@ func newCipherForConfig(opt *Options) (Cipher, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make cipher")
|
return nil, errors.Wrap(err, "failed to make cipher")
|
||||||
}
|
}
|
||||||
cipher.setPassCorrupted(opt.PassCorruptedBlocks)
|
|
||||||
return cipher, nil
|
return cipher, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,7 +123,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) {
|
|||||||
return newCipherForConfig(opt)
|
return newCipherForConfig(opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -178,23 +170,10 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
|
SetTier: true,
|
||||||
|
GetTier: true,
|
||||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
doChangeNotify := wrappedFs.Features().ChangeNotify
|
|
||||||
if doChangeNotify != nil {
|
|
||||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
|
||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
|
||||||
decrypted, err := f.DecryptFileName(path)
|
|
||||||
if err != nil {
|
|
||||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
notifyFunc(decrypted, entryType)
|
|
||||||
}
|
|
||||||
doChangeNotify(wrappedNotifyFunc, pollInterval)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,12 +185,12 @@ type Options struct {
|
|||||||
Password string `config:"password"`
|
Password string `config:"password"`
|
||||||
Password2 string `config:"password2"`
|
Password2 string `config:"password2"`
|
||||||
ShowMapping bool `config:"show_mapping"`
|
ShowMapping bool `config:"show_mapping"`
|
||||||
PassCorruptedBlocks bool `config:"pass_corrupted_blocks"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
fs.Fs
|
fs.Fs
|
||||||
|
wrapper fs.Fs
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
opt Options
|
opt Options
|
||||||
@@ -254,7 +233,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt an directory file name to entries.
|
// Encrypt an directory file name to entries.
|
||||||
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -264,18 +243,18 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
|||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newDir(dir))
|
*entries = append(*entries, f.newDir(ctx, dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||||
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||||
newEntries = entries[:0] // in place filter
|
newEntries = entries[:0] // in place filter
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
switch x := entry.(type) {
|
switch x := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
f.add(&newEntries, x)
|
f.add(&newEntries, x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
f.addDir(&newEntries, x)
|
f.addDir(ctx, &newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
@@ -292,12 +271,12 @@ func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
|
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.encryptEntries(entries)
|
return f.encryptEntries(ctx, entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -316,9 +295,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
||||||
newEntries, err := f.encryptEntries(entries)
|
newEntries, err := f.encryptEntries(ctx, entries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -327,18 +306,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
// NewObject finds the Object at remote.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
|
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.newObject(o), nil
|
return f.newObject(o), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||||
|
|
||||||
// put implements Put or PutStream
|
// put implements Put or PutStream
|
||||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||||
// Encrypt the data into wrappedIn
|
// Encrypt the data into wrappedIn
|
||||||
wrappedIn, err := f.cipher.EncryptData(in)
|
wrappedIn, err := f.cipher.EncryptData(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -364,7 +343,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transfer the data
|
// Transfer the data
|
||||||
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
|
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -373,13 +352,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
if ht != hash.None && hasher != nil {
|
if ht != hash.None && hasher != nil {
|
||||||
srcHash := hasher.Sums()[ht]
|
srcHash := hasher.Sums()[ht]
|
||||||
var dstHash string
|
var dstHash string
|
||||||
dstHash, err = o.Hash(ht)
|
dstHash, err = o.Hash(ctx, ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||||
}
|
}
|
||||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||||
// remove object
|
// remove object
|
||||||
err = o.Remove()
|
err = o.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
@@ -395,13 +374,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.put(in, src, options, f.Fs.Put)
|
return f.put(ctx, in, src, options, f.Fs.Put)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.put(in, src, options, f.Fs.Features().PutStream)
|
return f.put(ctx, in, src, options, f.Fs.Features().PutStream)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
@@ -412,15 +391,15 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Mkdir makes the directory (container, bucket)
|
// Mkdir makes the directory (container, bucket)
|
||||||
//
|
//
|
||||||
// Shouldn't return an error if it already exists
|
// Shouldn't return an error if it already exists
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
|
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
|
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge all files in the root and the root directory
|
// Purge all files in the root and the root directory
|
||||||
@@ -429,12 +408,12 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
// quicker than just running Remove() on the result of List()
|
// quicker than just running Remove() on the result of List()
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist
|
// Return an error if it doesn't exist
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
do := f.Fs.Features().Purge
|
do := f.Fs.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return fs.ErrorCantPurge
|
return fs.ErrorCantPurge
|
||||||
}
|
}
|
||||||
return do()
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
@@ -446,7 +425,7 @@ func (f *Fs) Purge() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
do := f.Fs.Features().Copy
|
do := f.Fs.Features().Copy
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
@@ -455,7 +434,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -471,7 +450,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
do := f.Fs.Features().Move
|
do := f.Fs.Features().Move
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
@@ -480,7 +459,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -495,7 +474,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
do := f.Fs.Features().DirMove
|
do := f.Fs.Features().DirMove
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return fs.ErrorCantDirMove
|
return fs.ErrorCantDirMove
|
||||||
@@ -505,14 +484,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
return fs.ErrorCantDirMove
|
return fs.ErrorCantDirMove
|
||||||
}
|
}
|
||||||
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
//
|
//
|
||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
do := f.Fs.Features().PutUnchecked
|
do := f.Fs.Features().PutUnchecked
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("can't PutUnchecked")
|
return nil, errors.New("can't PutUnchecked")
|
||||||
@@ -521,7 +500,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
o, err := do(wrappedIn, f.newObjectInfo(src))
|
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -532,21 +511,21 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
|||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
// otherwise cleaning up old versions of files.
|
// otherwise cleaning up old versions of files.
|
||||||
func (f *Fs) CleanUp() error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
do := f.Fs.Features().CleanUp
|
do := f.Fs.Features().CleanUp
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("can't CleanUp")
|
return errors.New("can't CleanUp")
|
||||||
}
|
}
|
||||||
return do()
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
// About gets quota information from the Fs
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("About not supported")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do()
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
// UnWrap returns the Fs that this Fs is wrapping
|
||||||
@@ -554,6 +533,16 @@ func (f *Fs) UnWrap() fs.Fs {
|
|||||||
return f.Fs
|
return f.Fs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WrapFs returns the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) WrapFs() fs.Fs {
|
||||||
|
return f.wrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWrapper sets the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||||
|
f.wrapper = wrapper
|
||||||
|
}
|
||||||
|
|
||||||
// EncryptFileName returns an encrypted file name
|
// EncryptFileName returns an encrypted file name
|
||||||
func (f *Fs) EncryptFileName(fileName string) string {
|
func (f *Fs) EncryptFileName(fileName string) string {
|
||||||
return f.cipher.EncryptFileName(fileName)
|
return f.cipher.EncryptFileName(fileName)
|
||||||
@@ -565,13 +554,13 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||||
// src with it, and calcuates the hash given by HashType on the fly
|
// src with it, and calculates the hash given by HashType on the fly
|
||||||
//
|
//
|
||||||
// Note that we break lots of encapsulation in this function.
|
// Note that we break lots of encapsulation in this function.
|
||||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||||
// use a limited read so we only read the header
|
// use a limited read so we only read the header
|
||||||
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||||
}
|
}
|
||||||
@@ -601,7 +590,7 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open the src for input
|
// Open the src for input
|
||||||
in, err = src.Open()
|
in, err = src.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to open src")
|
return "", errors.Wrap(err, "failed to open src")
|
||||||
}
|
}
|
||||||
@@ -626,6 +615,75 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
|
|||||||
return m.Sums()[hashType], nil
|
return m.Sums()[hashType], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MergeDirs merges the contents of all the directories passed
|
||||||
|
// in into the first one and rmdirs the other directories.
|
||||||
|
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||||
|
do := f.Fs.Features().MergeDirs
|
||||||
|
if do == nil {
|
||||||
|
return errors.New("MergeDirs not supported")
|
||||||
|
}
|
||||||
|
out := make([]fs.Directory, len(dirs))
|
||||||
|
for i, dir := range dirs {
|
||||||
|
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||||
|
}
|
||||||
|
return do(ctx, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirCacheFlush resets the directory cache - used in testing
|
||||||
|
// as an optional interface
|
||||||
|
func (f *Fs) DirCacheFlush() {
|
||||||
|
do := f.Fs.Features().DirCacheFlush
|
||||||
|
if do != nil {
|
||||||
|
do()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||||
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||||
|
do := f.Fs.Features().PublicLink
|
||||||
|
if do == nil {
|
||||||
|
return "", errors.New("PublicLink not supported")
|
||||||
|
}
|
||||||
|
o, err := f.NewObject(ctx, remote)
|
||||||
|
if err != nil {
|
||||||
|
// assume it is a directory
|
||||||
|
return do(ctx, f.cipher.EncryptDirName(remote))
|
||||||
|
}
|
||||||
|
return do(ctx, o.(*Object).Object.Remote())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangeNotify calls the passed function with a path
|
||||||
|
// that has had changes. If the implementation
|
||||||
|
// uses polling, it should adhere to the given interval.
|
||||||
|
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||||
|
do := f.Fs.Features().ChangeNotify
|
||||||
|
if do == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
|
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
decrypted string
|
||||||
|
)
|
||||||
|
switch entryType {
|
||||||
|
case fs.EntryDirectory:
|
||||||
|
decrypted, err = f.cipher.DecryptDirName(path)
|
||||||
|
case fs.EntryObject:
|
||||||
|
decrypted, err = f.cipher.DecryptFileName(path)
|
||||||
|
default:
|
||||||
|
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
notifyFunc(decrypted, entryType)
|
||||||
|
}
|
||||||
|
do(ctx, wrappedNotifyFunc, pollIntervalChan)
|
||||||
|
}
|
||||||
|
|
||||||
// Object describes a wrapped for being read from the Fs
|
// Object describes a wrapped for being read from the Fs
|
||||||
//
|
//
|
||||||
// This decrypts the remote name and decrypts the data
|
// This decrypts the remote name and decrypts the data
|
||||||
@@ -676,7 +734,7 @@ func (o *Object) Size() int64 {
|
|||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -686,7 +744,7 @@ func (o *Object) UnWrap() fs.Object {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
var openOptions []fs.OpenOption
|
var openOptions []fs.OpenOption
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@@ -700,10 +758,10 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|||||||
openOptions = append(openOptions, option)
|
openOptions = append(openOptions, option)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
if underlyingOffset == 0 && underlyingLimit < 0 {
|
if underlyingOffset == 0 && underlyingLimit < 0 {
|
||||||
// Open with no seek
|
// Open with no seek
|
||||||
return o.Object.Open(openOptions...)
|
return o.Object.Open(ctx, openOptions...)
|
||||||
}
|
}
|
||||||
// Open stream with a range of underlyingOffset, underlyingLimit
|
// Open stream with a range of underlyingOffset, underlyingLimit
|
||||||
end := int64(-1)
|
end := int64(-1)
|
||||||
@@ -714,7 +772,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
||||||
return o.Object.Open(newOpenOptions...)
|
return o.Object.Open(ctx, newOpenOptions...)
|
||||||
}, offset, limit)
|
}, offset, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -723,17 +781,17 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
// Update in to the object with the modTime given of the given size
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return o.Object, o.Object.Update(in, src, options...)
|
return o.Object, o.Object.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
_, err := o.f.put(in, src, options, update)
|
_, err := o.f.put(ctx, in, src, options, update)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDir returns a dir with the Name decrypted
|
// newDir returns a dir with the Name decrypted
|
||||||
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||||
newDir := fs.NewDirCopy(dir)
|
newDir := fs.NewDirCopy(ctx, dir)
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -744,6 +802,24 @@ func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
|||||||
return newDir
|
return newDir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UserInfo returns info about the connected user
|
||||||
|
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||||
|
do := f.Fs.Features().UserInfo
|
||||||
|
if do == nil {
|
||||||
|
return nil, fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disconnect the current user
|
||||||
|
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||||
|
do := f.Fs.Features().Disconnect
|
||||||
|
if do == nil {
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||||
//
|
//
|
||||||
// This encrypts the remote name and adjusts the size
|
// This encrypts the remote name and adjusts the size
|
||||||
@@ -780,10 +856,38 @@ func (o *ObjectInfo) Size() int64 {
|
|||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
|
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ID returns the ID of the Object if known, or "" if not
|
||||||
|
func (o *Object) ID() string {
|
||||||
|
do, ok := o.Object.(fs.IDer)
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return do.ID()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTier performs changing storage tier of the Object if
|
||||||
|
// multiple storage classes supported
|
||||||
|
func (o *Object) SetTier(tier string) error {
|
||||||
|
do, ok := o.Object.(fs.SetTierer)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("crypt: underlying remote does not support SetTier")
|
||||||
|
}
|
||||||
|
return do.SetTier(tier)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTier returns storage tier or class of the Object
|
||||||
|
func (o *Object) GetTier() string {
|
||||||
|
do, ok := o.Object.(fs.GetTierer)
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return do.GetTier()
|
||||||
|
}
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
@@ -797,7 +901,17 @@ var (
|
|||||||
_ fs.UnWrapper = (*Fs)(nil)
|
_ fs.UnWrapper = (*Fs)(nil)
|
||||||
_ fs.ListRer = (*Fs)(nil)
|
_ fs.ListRer = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
|
_ fs.UserInfoer = (*Fs)(nil)
|
||||||
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
|
_ fs.IDer = (*Object)(nil)
|
||||||
|
_ fs.SetTierer = (*Object)(nil)
|
||||||
|
_ fs.GetTierer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,13 +6,13 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/crypt"
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/ncw/rclone/backend/drive" // for integration tests
|
_ "github.com/rclone/rclone/backend/drive" // for integration tests
|
||||||
_ "github.com/ncw/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
_ "github.com/ncw/rclone/backend/swift" // for integration tests
|
_ "github.com/rclone/rclone/backend/swift" // for integration tests
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
@@ -21,8 +21,10 @@ func TestIntegration(t *testing.T) {
|
|||||||
t.Skip("Skipping as -remote not set")
|
t.Skip("Skipping as -remote not set")
|
||||||
}
|
}
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: *fstest.RemoteName,
|
RemoteName: *fstest.RemoteName,
|
||||||
NilObject: (*crypt.Object)(nil),
|
NilObject: (*crypt.Object)(nil),
|
||||||
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,6 +44,8 @@ func TestStandard(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
},
|
},
|
||||||
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -61,6 +65,8 @@ func TestOff(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||||
},
|
},
|
||||||
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,6 +86,8 @@ func TestObfuscate(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,8 @@
|
|||||||
// +build go1.9
|
|
||||||
|
|
||||||
package drive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -12,11 +11,11 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
_ "github.com/ncw/rclone/backend/local"
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/operations"
|
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/operations"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/api/drive/v3"
|
"google.golang.org/api/drive/v3"
|
||||||
@@ -197,7 +196,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
|||||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
|
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -211,7 +210,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
|||||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
|
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -222,10 +221,10 @@ func (f *Fs) InternalTestDocumentExport(t *testing.T) {
|
|||||||
f.exportExtensions, _, err = parseExtensions("txt")
|
f.exportExtensions, _, err = parseExtensions("txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
obj, err := f.NewObject("example2.txt")
|
obj, err := f.NewObject(context.Background(), "example2.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
rc, err := obj.Open()
|
rc, err := obj.Open(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer func() { require.NoError(t, rc.Close()) }()
|
defer func() { require.NoError(t, rc.Close()) }()
|
||||||
|
|
||||||
@@ -248,10 +247,10 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
|||||||
f.exportExtensions, _, err = parseExtensions("link.html")
|
f.exportExtensions, _, err = parseExtensions("link.html")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
obj, err := f.NewObject("example2.link.html")
|
obj, err := f.NewObject(context.Background(), "example2.link.html")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
rc, err := obj.Open()
|
rc, err := obj.Open(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer func() { require.NoError(t, rc.Close()) }()
|
defer func() { require.NoError(t, rc.Close()) }()
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,12 @@
|
|||||||
// Test Drive filesystem interface
|
// Test Drive filesystem interface
|
||||||
|
|
||||||
// +build go1.9
|
|
||||||
|
|
||||||
package drive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
// Build for unsupported platforms to stop go complaining
|
|
||||||
// about "no buildable Go source files "
|
|
||||||
|
|
||||||
// +build !go1.9
|
|
||||||
|
|
||||||
package drive
|
|
||||||
@@ -8,8 +8,6 @@
|
|||||||
//
|
//
|
||||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||||
|
|
||||||
// +build go1.9
|
|
||||||
|
|
||||||
package drive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -21,10 +19,10 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
|
||||||
"github.com/ncw/rclone/lib/readers"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"google.golang.org/api/drive/v3"
|
"google.golang.org/api/drive/v3"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
)
|
)
|
||||||
@@ -58,9 +56,7 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
|
|||||||
"uploadType": {"resumable"},
|
"uploadType": {"resumable"},
|
||||||
"fields": {partialFields},
|
"fields": {partialFields},
|
||||||
}
|
}
|
||||||
if f.isTeamDrive {
|
params.Set("supportsAllDrives", "true")
|
||||||
params.Set("supportsTeamDrives", "true")
|
|
||||||
}
|
|
||||||
if f.opt.KeepRevisionForever {
|
if f.opt.KeepRevisionForever {
|
||||||
params.Set("keepRevisionForever", "true")
|
params.Set("keepRevisionForever", "true")
|
||||||
}
|
}
|
||||||
@@ -185,7 +181,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
|||||||
// been 200 OK.
|
// been 200 OK.
|
||||||
//
|
//
|
||||||
// So parse the response out of the body. We aren't expecting
|
// So parse the response out of the body. We aren't expecting
|
||||||
// any other 2xx codes, so we parse it unconditionaly on
|
// any other 2xx codes, so we parse it unconditionally on
|
||||||
// StatusCode
|
// StatusCode
|
||||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||||
return 598, err
|
return 598, err
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/dropbox/dbhash"
|
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ of path_display and all will be well.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
@@ -37,17 +38,17 @@ import (
|
|||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/config"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/lib/oauthutil"
|
|
||||||
"github.com/ncw/rclone/lib/pacer"
|
|
||||||
"github.com/ncw/rclone/lib/readers"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -130,8 +131,8 @@ Any files larger than this will be uploaded in chunks of this size.
|
|||||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||||
deal with retries. Setting this larger will increase the speed
|
deal with retries. Setting this larger will increase the speed
|
||||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||||
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
|
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||||
Default: fs.SizeSuffix(defaultChunkSize),
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "impersonate",
|
Name: "impersonate",
|
||||||
@@ -160,7 +161,7 @@ type Fs struct {
|
|||||||
team team.Client // for the Teams API
|
team team.Client // for the Teams API
|
||||||
slashRoot string // root with "/" prefix, lowercase
|
slashRoot string // root with "/" prefix, lowercase
|
||||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||||
pacer *pacer.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
ns string // The namespace we are using or "" for none
|
ns string // The namespace we are using or "" for none
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -209,12 +210,12 @@ func shouldRetry(err error) (bool, error) {
|
|||||||
case auth.RateLimitAPIError:
|
case auth.RateLimitAPIError:
|
||||||
if e.RateLimitError.RetryAfter > 0 {
|
if e.RateLimitError.RetryAfter > 0 {
|
||||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
// Keep old behaviour for backward compatibility
|
// Keep old behavior for backward compatibility
|
||||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
return fserrors.ShouldRetry(err), err
|
return fserrors.ShouldRetry(err), err
|
||||||
@@ -239,7 +240,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -273,7 +274,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
config := dropbox.Config{
|
config := dropbox.Config{
|
||||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||||
@@ -441,7 +442,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -454,7 +455,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
root := f.slashRoot
|
root := f.slashRoot
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
root += "/" + dir
|
root += "/" + dir
|
||||||
@@ -541,22 +542,22 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
root := path.Join(f.slashRoot, dir)
|
root := path.Join(f.slashRoot, dir)
|
||||||
|
|
||||||
// can't create or run metadata on root
|
// can't create or run metadata on root
|
||||||
@@ -586,7 +587,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
// Rmdir deletes the container
|
// Rmdir deletes the container
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
root := path.Join(f.slashRoot, dir)
|
root := path.Join(f.slashRoot, dir)
|
||||||
|
|
||||||
// can't remove root
|
// can't remove root
|
||||||
@@ -642,7 +643,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@@ -687,7 +688,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() (err error) {
|
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||||
// Let dropbox delete the filesystem tree
|
// Let dropbox delete the filesystem tree
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
|
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
|
||||||
@@ -705,7 +706,7 @@ func (f *Fs) Purge() (err error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@@ -745,7 +746,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||||
absPath := "/" + path.Join(f.Root(), remote)
|
absPath := "/" + path.Join(f.Root(), remote)
|
||||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||||
@@ -798,7 +799,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@@ -834,7 +835,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||||
var q *users.SpaceUsage
|
var q *users.SpaceUsage
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
q, err = f.users.GetSpaceUsage()
|
q, err = f.users.GetSpaceUsage()
|
||||||
@@ -886,7 +887,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the dropbox special hash
|
// Hash returns the dropbox special hash
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.Dropbox {
|
if t != hash.Dropbox {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -948,7 +949,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||||
@@ -960,7 +961,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
//
|
//
|
||||||
// Commits the datastore
|
// Commits the datastore
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// Dropbox doesn't have a way of doing this so returning this
|
// Dropbox doesn't have a way of doing this so returning this
|
||||||
// error will cause the file to be deleted first then
|
// error will cause the file to be deleted first then
|
||||||
// re-uploaded to set the time.
|
// re-uploaded to set the time.
|
||||||
@@ -973,7 +974,8 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
fs.FixRangeOption(options, o.bytes)
|
||||||
headers := fs.OpenOptionHeaders(options)
|
headers := fs.OpenOptionHeaders(options)
|
||||||
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -1099,7 +1101,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
|||||||
// Copy the reader into the object updating modTime and size
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
remote := o.remotePath()
|
remote := o.remotePath()
|
||||||
if ignoredFiles.MatchString(remote) {
|
if ignoredFiles.MatchString(remote) {
|
||||||
fs.Logf(o, "File name disallowed - not uploading")
|
fs.Logf(o, "File name disallowed - not uploading")
|
||||||
@@ -1108,7 +1110,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
commitInfo := files.NewCommitInfo(o.remotePath())
|
commitInfo := files.NewCommitInfo(o.remotePath())
|
||||||
commitInfo.Mode.Tag = "overwrite"
|
commitInfo.Mode.Tag = "overwrite"
|
||||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||||
commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second)
|
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
var err error
|
var err error
|
||||||
@@ -1128,7 +1130,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
|
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ package dropbox
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
389
backend/fichier/api.go
Normal file
389
backend/fichier/api.go
Normal file
@@ -0,0 +1,389 @@
|
|||||||
|
package fichier
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
|
var retryErrorCodes = []int{
|
||||||
|
429, // Too Many Requests.
|
||||||
|
500, // Internal Server Error
|
||||||
|
502, // Bad Gateway
|
||||||
|
503, // Service Unavailable
|
||||||
|
504, // Gateway Timeout
|
||||||
|
509, // Bandwidth Limit Exceeded
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
|
// deserve to be retried. It returns the err as a convenience
|
||||||
|
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||||
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
|
}
|
||||||
|
|
||||||
|
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||||
|
|
||||||
|
func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
|
||||||
|
request := DownloadRequest{
|
||||||
|
URL: url,
|
||||||
|
Single: 1,
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/download/get_token.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
var token GetTokenResponse
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(&opts, &request, &token)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't list files")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileFromSharedFile(file *SharedFile) File {
|
||||||
|
return File{
|
||||||
|
URL: file.Link,
|
||||||
|
Filename: file.Filename,
|
||||||
|
Size: file.Size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
RootURL: "https://1fichier.com/dir/",
|
||||||
|
Path: id,
|
||||||
|
Parameters: map[string][]string{"json": {"1"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
var sharedFiles SharedFolderResponse
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't list files")
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = make([]fs.DirEntry, len(sharedFiles))
|
||||||
|
|
||||||
|
for i, sharedFile := range sharedFiles {
|
||||||
|
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
|
||||||
|
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
|
||||||
|
request := ListFilesRequest{
|
||||||
|
FolderID: directoryID,
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/file/ls.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
filesList = &FilesList{}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(&opts, &request, filesList)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't list files")
|
||||||
|
}
|
||||||
|
|
||||||
|
return filesList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
|
||||||
|
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
|
||||||
|
|
||||||
|
request := ListFolderRequest{
|
||||||
|
FolderID: directoryID,
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/folder/ls.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
foldersList = &FoldersList{}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(&opts, &request, foldersList)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't list folders")
|
||||||
|
}
|
||||||
|
|
||||||
|
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
|
||||||
|
|
||||||
|
return foldersList, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
folderID, err := strconv.Atoi(directoryID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := f.listFiles(folderID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
folders, err := f.listFolders(folderID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
|
||||||
|
|
||||||
|
for i, item := range files.Items {
|
||||||
|
item.Filename = restoreReservedChars(item.Filename)
|
||||||
|
entries[i] = f.newObjectFromFile(ctx, dir, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, folder := range folders.SubFolders {
|
||||||
|
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
folder.Name = restoreReservedChars(folder.Name)
|
||||||
|
fullPath := getRemote(dir, folder.Name)
|
||||||
|
folderID := strconv.Itoa(folder.ID)
|
||||||
|
|
||||||
|
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
|
||||||
|
|
||||||
|
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
|
||||||
|
f.dirCache.Put(fullPath, folderID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
|
||||||
|
return &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: getRemote(dir, item.Filename),
|
||||||
|
file: item,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRemote(dir, fileName string) string {
|
||||||
|
if dir == "" {
|
||||||
|
return fileName
|
||||||
|
}
|
||||||
|
|
||||||
|
return dir + "/" + fileName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||||
|
name := replaceReservedChars(leaf)
|
||||||
|
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
|
||||||
|
|
||||||
|
request := MakeFolderRequest{
|
||||||
|
FolderID: folderID,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/folder/mkdir.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = &MakeFolderResponse{}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(&opts, &request, response)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't create folder")
|
||||||
|
}
|
||||||
|
|
||||||
|
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
||||||
|
|
||||||
|
return response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
|
||||||
|
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
|
||||||
|
|
||||||
|
request := &RemoveFolderRequest{
|
||||||
|
FolderID: folderID,
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/folder/rm.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = &GenericOKResponse{}
|
||||||
|
var resp *http.Response
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = f.rest.CallJSON(&opts, request, response)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||||
|
}
|
||||||
|
if response.Status != "OK" {
|
||||||
|
return nil, errors.New("Can't remove non-empty dir")
|
||||||
|
}
|
||||||
|
|
||||||
|
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
|
||||||
|
request := &RemoveFileRequest{
|
||||||
|
Files: []RmFile{
|
||||||
|
{url},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/file/rm.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = &GenericOKResponse{}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(&opts, request, response)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't remove file")
|
||||||
|
}
|
||||||
|
|
||||||
|
// fs.Debugf(f, "Removed file with url `%s`", url)
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||||
|
// fs.Debugf(f, "Requesting Upload node")
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
ContentType: "application/json", // 1Fichier API is bad
|
||||||
|
Path: "/upload/get_upload_server.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = &GetUploadNodeResponse{}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||||
|
}
|
||||||
|
|
||||||
|
// fs.Debugf(f, "Got Upload node")
|
||||||
|
|
||||||
|
return response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||||
|
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||||
|
|
||||||
|
fileName = replaceReservedChars(fileName)
|
||||||
|
|
||||||
|
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||||
|
return nil, errors.New("Invalid UploadID")
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/upload.cgi",
|
||||||
|
Parameters: map[string][]string{
|
||||||
|
"id": {uploadID},
|
||||||
|
},
|
||||||
|
NoResponse: true,
|
||||||
|
Body: in,
|
||||||
|
ContentLength: &size,
|
||||||
|
MultipartContentName: "file[]",
|
||||||
|
MultipartFileName: fileName,
|
||||||
|
MultipartParams: map[string][]string{
|
||||||
|
"did": {folderID},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if node != "" {
|
||||||
|
opts.RootURL = "https://" + node
|
||||||
|
}
|
||||||
|
|
||||||
|
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(&opts, nil, nil)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't upload file")
|
||||||
|
}
|
||||||
|
|
||||||
|
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
||||||
|
|
||||||
|
return response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
||||||
|
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||||
|
|
||||||
|
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||||
|
return nil, errors.New("Invalid UploadID")
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/end.pl",
|
||||||
|
RootURL: "https://" + nodeurl,
|
||||||
|
Parameters: map[string][]string{
|
||||||
|
"xid": {uploadID},
|
||||||
|
},
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"JSON": "1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
response = &EndFileUploadResponse{}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't finish file upload")
|
||||||
|
}
|
||||||
|
|
||||||
|
return response, err
|
||||||
|
}
|
||||||
411
backend/fichier/fichier.go
Normal file
411
backend/fichier/fichier.go
Normal file
@@ -0,0 +1,411 @@
|
|||||||
|
package fichier
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/dircache"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rootID = "0"
|
||||||
|
apiBaseURL = "https://api.1fichier.com/v1"
|
||||||
|
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
|
||||||
|
maxSleep = 5 * time.Second
|
||||||
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
fs.Register(&fs.RegInfo{
|
||||||
|
Name: "fichier",
|
||||||
|
Description: "1Fichier",
|
||||||
|
Config: func(name string, config configmap.Mapper) {
|
||||||
|
},
|
||||||
|
NewFs: NewFs,
|
||||||
|
Options: []fs.Option{
|
||||||
|
{
|
||||||
|
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||||
|
Name: "api_key",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Help: "If you want to download a shared folder, add this parameter",
|
||||||
|
Name: "shared_folder",
|
||||||
|
Required: false,
|
||||||
|
Advanced: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
APIKey string `config:"api_key"`
|
||||||
|
SharedFolder string `config:"shared_folder"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs is the interface a cloud storage system must provide
|
||||||
|
type Fs struct {
|
||||||
|
root string
|
||||||
|
name string
|
||||||
|
features *fs.Features
|
||||||
|
dirCache *dircache.DirCache
|
||||||
|
baseClient *http.Client
|
||||||
|
options *Options
|
||||||
|
pacer *fs.Pacer
|
||||||
|
rest *rest.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
|
folderID, err := strconv.Atoi(pathID)
|
||||||
|
if err != nil {
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
folders, err := f.listFolders(folderID)
|
||||||
|
if err != nil {
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, folder := range folders.SubFolders {
|
||||||
|
if folder.Name == leaf {
|
||||||
|
pathIDOut := strconv.Itoa(folder.ID)
|
||||||
|
return pathIDOut, true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||||
|
folderID, err := strconv.Atoi(pathID)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
resp, err := f.makeFolder(leaf, folderID)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strconv.Itoa(resp.FolderID), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a description of the FS
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return fmt.Sprintf("1Fichier root '%s'", f.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision of the ModTimes in this Fs
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return fs.ModTimeNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes returns the supported hash types of the filesystem
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return hash.Set(hash.Whirlpool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs makes a new Fs object from the path
|
||||||
|
//
|
||||||
|
// The path is of the form remote:path
|
||||||
|
//
|
||||||
|
// Remotes are looked up in the config file. If the remote isn't
|
||||||
|
// found then NotFoundInConfigFile will be returned.
|
||||||
|
//
|
||||||
|
// On Windows avoid single character remote names as they can be mixed
|
||||||
|
// up with drive letters.
|
||||||
|
func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
|
||||||
|
root := replaceReservedChars(rootleaf)
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(config, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If using a Shared Folder override root
|
||||||
|
if opt.SharedFolder != "" {
|
||||||
|
root = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
//workaround for wonky parser
|
||||||
|
root = strings.Trim(root, "/")
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
root: root,
|
||||||
|
options: opt,
|
||||||
|
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
|
baseClient: &http.Client{},
|
||||||
|
}
|
||||||
|
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
DuplicateFiles: true,
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
}).Fill(f)
|
||||||
|
|
||||||
|
client := fshttp.NewClient(fs.Config)
|
||||||
|
|
||||||
|
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||||
|
|
||||||
|
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
|
||||||
|
|
||||||
|
f.dirCache = dircache.New(root, rootID, f)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Find the current root
|
||||||
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
|
if err != nil {
|
||||||
|
// Assume it is a file
|
||||||
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
|
tempF := *f
|
||||||
|
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||||
|
tempF.root = newRoot
|
||||||
|
// Make new Fs which is the parent
|
||||||
|
err = tempF.dirCache.FindRoot(ctx, false)
|
||||||
|
if err != nil {
|
||||||
|
// No root so return old f
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
_, err := tempF.NewObject(ctx, remote)
|
||||||
|
if err != nil {
|
||||||
|
if err == fs.ErrorObjectNotFound {
|
||||||
|
// File doesn't exist so return old f
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.features.Fill(&tempF)
|
||||||
|
// XXX: update the old f here instead of returning tempF, since
|
||||||
|
// `features` were already filled with functions having *f as a receiver.
|
||||||
|
// See https://github.com/rclone/rclone/issues/2182
|
||||||
|
f.dirCache = tempF.dirCache
|
||||||
|
f.root = tempF.root
|
||||||
|
// return an error with an fs which points to the parent
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the objects and directories in dir into entries. The
|
||||||
|
// entries can be returned in any order but should be for a
|
||||||
|
// complete directory.
|
||||||
|
//
|
||||||
|
// dir should be "" to list the root, and should not have
|
||||||
|
// trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
if f.options.SharedFolder != "" {
|
||||||
|
return f.listSharedFiles(ctx, f.options.SharedFolder)
|
||||||
|
}
|
||||||
|
|
||||||
|
dirContent, err := f.listDir(ctx, dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dirContent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
|
// it returns the error ErrorObjectNotFound.
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
|
||||||
|
if err != nil {
|
||||||
|
if err == fs.ErrorDirNotFound {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
folderID, err := strconv.Atoi(directoryID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
files, err := f.listFiles(folderID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files.Items {
|
||||||
|
if file.Filename == leaf {
|
||||||
|
path, ok := f.dirCache.GetInv(directoryID)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("Cannot find dir in dircache")
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.newObjectFromFile(ctx, path, file), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put in to the remote path with the modTime given of the given size
|
||||||
|
//
|
||||||
|
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||||
|
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||||
|
// return an error or upload it properly (rather than e.g. calling panic).
|
||||||
|
//
|
||||||
|
// May create the object even if it returns an error - if so
|
||||||
|
// will return the object and the error, otherwise will return
|
||||||
|
// nil and the error
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||||
|
case fs.ErrorObjectNotFound:
|
||||||
|
// Not found so create it
|
||||||
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// putUnchecked uploads the object with the given name and size
|
||||||
|
//
|
||||||
|
// This will create a duplicate if we upload a new file without
|
||||||
|
// checking to see if there is one already - use Put() for that.
|
||||||
|
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
if size > int64(100E9) {
|
||||||
|
return nil, errors.New("File too big, cant upload")
|
||||||
|
} else if size == 0 {
|
||||||
|
return nil, fs.ErrorCantUploadEmptyFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeResponse, err := f.getUploadNode()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fileUploadResponse.Links) != 1 {
|
||||||
|
return nil, errors.New("unexpected amount of files")
|
||||||
|
}
|
||||||
|
|
||||||
|
link := fileUploadResponse.Links[0]
|
||||||
|
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
file: File{
|
||||||
|
ACL: 0,
|
||||||
|
CDN: 0,
|
||||||
|
Checksum: link.Whirlpool,
|
||||||
|
ContentType: "",
|
||||||
|
Date: time.Now().Format("2006-01-02 15:04:05"),
|
||||||
|
Filename: link.Filename,
|
||||||
|
Pass: 0,
|
||||||
|
Size: int(fileSize),
|
||||||
|
URL: link.Download,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutUnchecked uploads the object
|
||||||
|
//
|
||||||
|
// This will create a duplicate if we upload a new file without
|
||||||
|
// checking to see if there is one already - use Put() for that.
|
||||||
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir makes the directory (container, bucket)
|
||||||
|
//
|
||||||
|
// Shouldn't return an error if it already exists
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if dir != "" {
|
||||||
|
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
|
//
|
||||||
|
// Return an error if it doesn't exist or isn't empty
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
err := f.dirCache.FindRoot(ctx, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
folderID, err := strconv.Atoi(directoryID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.removeFolder(dir, folderID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.dirCache.FlushDir(dir)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
|
_ dircache.DirCacher = (*Fs)(nil)
|
||||||
|
)
|
||||||
17
backend/fichier/fichier_test.go
Normal file
17
backend/fichier/fichier_test.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
// Test 1Fichier filesystem interface
|
||||||
|
package fichier
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fs.Config.LogLevel = fs.LogLevelDebug
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestFichier:",
|
||||||
|
})
|
||||||
|
}
|
||||||
158
backend/fichier/object.go
Normal file
158
backend/fichier/object.go
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
package fichier
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Object is a filesystem like object provided by an Fs
|
||||||
|
type Object struct {
|
||||||
|
fs *Fs
|
||||||
|
remote string
|
||||||
|
file File
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a description of the Object
|
||||||
|
func (o *Object) String() string {
|
||||||
|
return o.file.Filename
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the remote path
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification date of the file
|
||||||
|
// It should return a best guess if one isn't available
|
||||||
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
return modTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the file
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
return int64(o.file.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs returns read only access to the Fs that this object is part of
|
||||||
|
func (o *Object) Fs() fs.Info {
|
||||||
|
return o.fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns the selected checksum of the file
|
||||||
|
// If no checksum is available it returns ""
|
||||||
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
|
if t != hash.Whirlpool {
|
||||||
|
return "", hash.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.file.Checksum, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable says whether this object can be stored
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime sets the metadata on the object to set the modification date
|
||||||
|
func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||||
|
return fs.ErrorCantSetModTime
|
||||||
|
//return errors.New("setting modtime is not supported for 1fichier remotes")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||||
|
fs.FixRangeOption(options, int64(o.file.Size))
|
||||||
|
downloadToken, err := o.fs.getDownloadToken(o.file.URL)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp *http.Response
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
RootURL: downloadToken.URL,
|
||||||
|
Options: options,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = o.fs.rest.Call(&opts)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.Body, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update in to the object with the modTime given of the given size
|
||||||
|
//
|
||||||
|
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||||
|
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||||
|
// return an error or update the object properly (rather than e.g. calling panic).
|
||||||
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
if src.Size() < 0 {
|
||||||
|
return errors.New("refusing to update with unknown size")
|
||||||
|
}
|
||||||
|
|
||||||
|
// upload with new size but old name
|
||||||
|
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete duplicate after successful upload
|
||||||
|
err = o.Remove(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to remove old version")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace guts of old object with new one
|
||||||
|
*o = *info.(*Object)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes this object
|
||||||
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
|
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
|
||||||
|
|
||||||
|
_, err := o.fs.deleteFile(o.file.URL)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MimeType of an Object if known, "" otherwise
|
||||||
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
|
return o.file.ContentType
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the ID of the Object if known, or "" if not
|
||||||
|
func (o *Object) ID() string {
|
||||||
|
return o.file.URL
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Object = (*Object)(nil)
|
||||||
|
_ fs.MimeTyper = (*Object)(nil)
|
||||||
|
_ fs.IDer = (*Object)(nil)
|
||||||
|
)
|
||||||
71
backend/fichier/replace.go
Normal file
71
backend/fichier/replace.go
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
/*
|
||||||
|
Translate file names for 1fichier
|
||||||
|
|
||||||
|
1Fichier reserved characters
|
||||||
|
|
||||||
|
The following characters are 1Fichier reserved characters, and can't
|
||||||
|
be used in 1Fichier folder and file names.
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
package fichier
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// charMap holds replacements for characters
|
||||||
|
//
|
||||||
|
// 1Fichier has a restricted set of characters compared to other cloud
|
||||||
|
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||||
|
// equivalents
|
||||||
|
//
|
||||||
|
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||||
|
var (
|
||||||
|
charMap = map[rune]rune{
|
||||||
|
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||||
|
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||||
|
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||||
|
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||||
|
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||||
|
'$': '$', // FULLWIDTH DOLLAR SIGN
|
||||||
|
'`': '`', // FULLWIDTH GRAVE ACCENT
|
||||||
|
' ': '␠', // SYMBOL FOR SPACE
|
||||||
|
}
|
||||||
|
invCharMap map[rune]rune
|
||||||
|
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Create inverse charMap
|
||||||
|
invCharMap = make(map[rune]rune, len(charMap))
|
||||||
|
for k, v := range charMap {
|
||||||
|
invCharMap[v] = k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// replaceReservedChars takes a path and substitutes any reserved
|
||||||
|
// characters in it
|
||||||
|
func replaceReservedChars(in string) string {
|
||||||
|
// file names can't start with space either
|
||||||
|
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||||
|
// Replace reserved characters
|
||||||
|
return strings.Map(func(c rune) rune {
|
||||||
|
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||||
|
return replacement
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}, in)
|
||||||
|
}
|
||||||
|
|
||||||
|
// restoreReservedChars takes a path and undoes any substitutions
|
||||||
|
// made by replaceReservedChars
|
||||||
|
func restoreReservedChars(in string) string {
|
||||||
|
return strings.Map(func(c rune) rune {
|
||||||
|
if replacement, ok := invCharMap[c]; ok {
|
||||||
|
return replacement
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}, in)
|
||||||
|
}
|
||||||
24
backend/fichier/replace_test.go
Normal file
24
backend/fichier/replace_test.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package fichier
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestReplace(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
out string
|
||||||
|
}{
|
||||||
|
{"", ""},
|
||||||
|
{"abc 123", "abc 123"},
|
||||||
|
{"\"'<>/\\$`", `"'<>/\$``},
|
||||||
|
{" leading space", "␠leading space"},
|
||||||
|
} {
|
||||||
|
got := replaceReservedChars(test.in)
|
||||||
|
if got != test.out {
|
||||||
|
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||||
|
}
|
||||||
|
got2 := restoreReservedChars(got)
|
||||||
|
if got2 != test.in {
|
||||||
|
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
120
backend/fichier/structs.go
Normal file
120
backend/fichier/structs.go
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
package fichier
|
||||||
|
|
||||||
|
// ListFolderRequest is the request structure of the corresponding request
|
||||||
|
type ListFolderRequest struct {
|
||||||
|
FolderID int `json:"folder_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFilesRequest is the request structure of the corresponding request
|
||||||
|
type ListFilesRequest struct {
|
||||||
|
FolderID int `json:"folder_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadRequest is the request structure of the corresponding request
|
||||||
|
type DownloadRequest struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Single int `json:"single"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveFolderRequest is the request structure of the corresponding request
|
||||||
|
type RemoveFolderRequest struct {
|
||||||
|
FolderID int `json:"folder_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveFileRequest is the request structure of the corresponding request
|
||||||
|
type RemoveFileRequest struct {
|
||||||
|
Files []RmFile `json:"files"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RmFile is the request structure of the corresponding request
|
||||||
|
type RmFile struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenericOKResponse is the response structure of the corresponding request
|
||||||
|
type GenericOKResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeFolderRequest is the request structure of the corresponding request
|
||||||
|
type MakeFolderRequest struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
FolderID int `json:"folder_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeFolderResponse is the response structure of the corresponding request
|
||||||
|
type MakeFolderResponse struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
FolderID int `json:"folder_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||||
|
type GetUploadNodeResponse struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTokenResponse is the response structure of the corresponding request
|
||||||
|
type GetTokenResponse struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Status string `json:"Status"`
|
||||||
|
Message string `json:"Message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SharedFolderResponse is the response structure of the corresponding request
|
||||||
|
type SharedFolderResponse []SharedFile
|
||||||
|
|
||||||
|
// SharedFile is the structure how 1Fichier returns a shared File
|
||||||
|
type SharedFile struct {
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
Link string `json:"link"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndFileUploadResponse is the response structure of the corresponding request
|
||||||
|
type EndFileUploadResponse struct {
|
||||||
|
Incoming int `json:"incoming"`
|
||||||
|
Links []struct {
|
||||||
|
Download string `json:"download"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
Remove string `json:"remove"`
|
||||||
|
Size string `json:"size"`
|
||||||
|
Whirlpool string `json:"whirlpool"`
|
||||||
|
} `json:"links"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// File is the structure how 1Fichier returns a File
|
||||||
|
type File struct {
|
||||||
|
ACL int `json:"acl"`
|
||||||
|
CDN int `json:"cdn"`
|
||||||
|
Checksum string `json:"checksum"`
|
||||||
|
ContentType string `json:"content-type"`
|
||||||
|
Date string `json:"date"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
Pass int `json:"pass"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilesList is the structure how 1Fichier returns a list of files
|
||||||
|
type FilesList struct {
|
||||||
|
Items []File `json:"items"`
|
||||||
|
Status string `json:"Status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Folder is the structure how 1Fichier returns a Folder
|
||||||
|
type Folder struct {
|
||||||
|
CreateDate string `json:"create_date"`
|
||||||
|
ID int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Pass int `json:"pass"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FoldersList is the structure how 1Fichier returns a list of Folders
|
||||||
|
type FoldersList struct {
|
||||||
|
FolderID int `json:"folder_id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Status string `json:"Status"`
|
||||||
|
SubFolders []Folder `json:"sub_folders"`
|
||||||
|
}
|
||||||
@@ -2,6 +2,8 @@
|
|||||||
package ftp
|
package ftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
"io"
|
"io"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
"os"
|
"os"
|
||||||
@@ -10,13 +12,14 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jlaffaye/ftp"
|
"github.com/jlaffaye/ftp"
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/lib/readers"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -45,6 +48,20 @@ func init() {
|
|||||||
Help: "FTP password",
|
Help: "FTP password",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
}, {
|
||||||
|
Name: "tls",
|
||||||
|
Help: "Use FTP over TLS (Implicit)",
|
||||||
|
Default: false,
|
||||||
|
}, {
|
||||||
|
Name: "concurrency",
|
||||||
|
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||||
|
Default: 0,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_check_certificate",
|
||||||
|
Help: "Do not verify the TLS certificate of the server",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
@@ -52,10 +69,13 @@ func init() {
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Host string `config:"host"`
|
Host string `config:"host"`
|
||||||
User string `config:"user"`
|
User string `config:"user"`
|
||||||
Pass string `config:"pass"`
|
Pass string `config:"pass"`
|
||||||
Port string `config:"port"`
|
Port string `config:"port"`
|
||||||
|
TLS bool `config:"tls"`
|
||||||
|
Concurrency int `config:"concurrency"`
|
||||||
|
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote FTP server
|
// Fs represents a remote FTP server
|
||||||
@@ -70,6 +90,7 @@ type Fs struct {
|
|||||||
dialAddr string
|
dialAddr string
|
||||||
poolMu sync.Mutex
|
poolMu sync.Mutex
|
||||||
pool []*ftp.ServerConn
|
pool []*ftp.ServerConn
|
||||||
|
tokens *pacer.TokenDispenser
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes an FTP file
|
// Object describes an FTP file
|
||||||
@@ -112,7 +133,15 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
// Open a new connection to the FTP server.
|
// Open a new connection to the FTP server.
|
||||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||||
fs.Debugf(f, "Connecting to FTP server")
|
fs.Debugf(f, "Connecting to FTP server")
|
||||||
c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
|
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
||||||
|
if f.opt.TLS {
|
||||||
|
tlsConfig := &tls.Config{
|
||||||
|
ServerName: f.opt.Host,
|
||||||
|
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||||
|
}
|
||||||
|
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||||
|
}
|
||||||
|
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
return nil, errors.Wrap(err, "ftpConnection Dial")
|
||||||
@@ -128,6 +157,9 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
|||||||
|
|
||||||
// Get an FTP connection from the pool, or open a new one
|
// Get an FTP connection from the pool, or open a new one
|
||||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||||
|
if f.opt.Concurrency > 0 {
|
||||||
|
f.tokens.Get()
|
||||||
|
}
|
||||||
f.poolMu.Lock()
|
f.poolMu.Lock()
|
||||||
if len(f.pool) > 0 {
|
if len(f.pool) > 0 {
|
||||||
c = f.pool[0]
|
c = f.pool[0]
|
||||||
@@ -147,6 +179,9 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
|||||||
// if err is not nil then it checks the connection is alive using a
|
// if err is not nil then it checks the connection is alive using a
|
||||||
// NOOP request
|
// NOOP request
|
||||||
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||||
|
if f.opt.Concurrency > 0 {
|
||||||
|
defer f.tokens.Put()
|
||||||
|
}
|
||||||
c := *pc
|
c := *pc
|
||||||
*pc = nil
|
*pc = nil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -166,8 +201,9 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
|||||||
f.poolMu.Unlock()
|
f.poolMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||||
|
ctx := context.Background()
|
||||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -189,7 +225,11 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
dialAddr := opt.Host + ":" + port
|
dialAddr := opt.Host + ":" + port
|
||||||
u := "ftp://" + path.Join(dialAddr+"/", root)
|
protocol := "ftp://"
|
||||||
|
if opt.TLS {
|
||||||
|
protocol = "ftps://"
|
||||||
|
}
|
||||||
|
u := protocol + path.Join(dialAddr+"/", root)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
@@ -198,6 +238,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
user: user,
|
user: user,
|
||||||
pass: pass,
|
pass: pass,
|
||||||
dialAddr: dialAddr,
|
dialAddr: dialAddr,
|
||||||
|
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
@@ -215,7 +256,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
if f.root == "." {
|
if f.root == "." {
|
||||||
f.root = ""
|
f.root = ""
|
||||||
}
|
}
|
||||||
_, err := f.NewObject(remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@@ -280,7 +321,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||||
entry, err := f.findItem(remote)
|
entry, err := f.findItem(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -324,17 +365,42 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list")
|
return nil, errors.Wrap(err, "list")
|
||||||
}
|
}
|
||||||
files, err := c.List(path.Join(f.root, dir))
|
|
||||||
f.putFtpConnection(&c, err)
|
var listErr error
|
||||||
if err != nil {
|
var files []*ftp.Entry
|
||||||
return nil, translateErrorDir(err)
|
|
||||||
|
resultchan := make(chan []*ftp.Entry, 1)
|
||||||
|
errchan := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
result, err := c.List(path.Join(f.root, dir))
|
||||||
|
f.putFtpConnection(&c, err)
|
||||||
|
if err != nil {
|
||||||
|
errchan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resultchan <- result
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for List for up to Timeout seconds
|
||||||
|
timer := time.NewTimer(fs.Config.Timeout)
|
||||||
|
select {
|
||||||
|
case listErr = <-errchan:
|
||||||
|
timer.Stop()
|
||||||
|
return nil, translateErrorDir(listErr)
|
||||||
|
case files = <-resultchan:
|
||||||
|
timer.Stop()
|
||||||
|
case <-timer.C:
|
||||||
|
// if timer fired assume no error but connection dead
|
||||||
|
fs.Errorf(f, "Timeout when waiting for List")
|
||||||
|
return nil, errors.New("Timeout when waiting for List")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Annoyingly FTP returns success for a directory which
|
// Annoyingly FTP returns success for a directory which
|
||||||
// doesn't exist, so check it really doesn't exist if no
|
// doesn't exist, so check it really doesn't exist if no
|
||||||
// entries found.
|
// entries found.
|
||||||
@@ -389,7 +455,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||||
err := f.mkParentDir(src.Remote())
|
err := f.mkParentDir(src.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -399,13 +465,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
err = o.Update(in, src, options...)
|
err = o.Update(ctx, in, src, options...)
|
||||||
return o, err
|
return o, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getInfo reads the FileInfo for a path
|
// getInfo reads the FileInfo for a path
|
||||||
@@ -483,7 +549,7 @@ func (f *Fs) mkParentDir(remote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the directory if it doesn't exist
|
// Mkdir creates the directory if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) (err error) {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
return f.mkdir(root)
|
return f.mkdir(root)
|
||||||
@@ -492,7 +558,7 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
|||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||||
@@ -503,7 +569,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Move renames a remote file object
|
// Move renames a remote file object
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@@ -525,7 +591,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move Rename failed")
|
return nil, errors.Wrap(err, "Move Rename failed")
|
||||||
}
|
}
|
||||||
dstObj, err := f.NewObject(remote)
|
dstObj, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||||
}
|
}
|
||||||
@@ -540,7 +606,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@@ -603,7 +669,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the hash of an object returning a lowercase hex string
|
// Hash returns the hash of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -613,12 +679,12 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
return o.info.ModTime
|
return o.info.ModTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the object
|
// SetModTime sets the modification time of the object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -679,7 +745,7 @@ func (f *ftpReadCloser) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
@@ -713,7 +779,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|||||||
// Copy the reader into the object updating modTime and size
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
|
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
// remove the file if upload failed
|
// remove the file if upload failed
|
||||||
@@ -723,7 +789,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
||||||
// able to think of a better method to find out if the server has finished - ncw
|
// able to think of a better method to find out if the server has finished - ncw
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
removeErr := o.Remove()
|
removeErr := o.Remove(ctx)
|
||||||
if removeErr != nil {
|
if removeErr != nil {
|
||||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||||
} else {
|
} else {
|
||||||
@@ -749,7 +815,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
// defer fs.Trace(o, "")("err=%v", &err)
|
// defer fs.Trace(o, "")("err=%v", &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
// Check if it's a directory or a file
|
// Check if it's a directory or a file
|
||||||
@@ -758,7 +824,7 @@ func (o *Object) Remove() (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info.IsDir {
|
if info.IsDir {
|
||||||
err = o.fs.Rmdir(o.remote)
|
err = o.fs.Rmdir(ctx, o.remote)
|
||||||
} else {
|
} else {
|
||||||
c, err := o.fs.getFtpConnection()
|
c, err := o.fs.getFtpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ package ftp_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/ftp"
|
"github.com/rclone/rclone/backend/ftp"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -1,7 +1,4 @@
|
|||||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||||
|
|
||||||
// +build go1.9
|
|
||||||
|
|
||||||
package googlecloudstorage
|
package googlecloudstorage
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -16,6 +13,7 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -25,26 +23,27 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/config"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/fs/walk"
|
|
||||||
"github.com/ncw/rclone/lib/oauthutil"
|
|
||||||
"github.com/ncw/rclone/lib/pacer"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/walk"
|
||||||
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
|
|
||||||
|
// NOTE: This API is deprecated
|
||||||
storage "google.golang.org/api/storage/v1"
|
storage "google.golang.org/api/storage/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -61,7 +60,7 @@ const (
|
|||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
storageConfig = &oauth2.Config{
|
storageConfig = &oauth2.Config{
|
||||||
Scopes: []string{storage.DevstorageFullControlScope},
|
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||||
Endpoint: google.Endpoint,
|
Endpoint: google.Endpoint,
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
@@ -144,6 +143,22 @@ func init() {
|
|||||||
Value: "publicReadWrite",
|
Value: "publicReadWrite",
|
||||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "bucket_policy_only",
|
||||||
|
Help: `Access checks should use bucket-level IAM policies.
|
||||||
|
|
||||||
|
If you want to upload objects to a bucket with Bucket Policy Only set
|
||||||
|
then you will need to set this.
|
||||||
|
|
||||||
|
When it is set, rclone:
|
||||||
|
|
||||||
|
- ignores ACLs set on buckets
|
||||||
|
- ignores ACLs set on objects
|
||||||
|
- creates buckets with Bucket Policy Only set
|
||||||
|
|
||||||
|
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "location",
|
Name: "location",
|
||||||
Help: "Location for the newly created buckets.",
|
Help: "Location for the newly created buckets.",
|
||||||
@@ -162,21 +177,36 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Value: "asia-east1",
|
Value: "asia-east1",
|
||||||
Help: "Taiwan.",
|
Help: "Taiwan.",
|
||||||
|
}, {
|
||||||
|
Value: "asia-east2",
|
||||||
|
Help: "Hong Kong.",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-northeast1",
|
Value: "asia-northeast1",
|
||||||
Help: "Tokyo.",
|
Help: "Tokyo.",
|
||||||
|
}, {
|
||||||
|
Value: "asia-south1",
|
||||||
|
Help: "Mumbai.",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-southeast1",
|
Value: "asia-southeast1",
|
||||||
Help: "Singapore.",
|
Help: "Singapore.",
|
||||||
}, {
|
}, {
|
||||||
Value: "australia-southeast1",
|
Value: "australia-southeast1",
|
||||||
Help: "Sydney.",
|
Help: "Sydney.",
|
||||||
|
}, {
|
||||||
|
Value: "europe-north1",
|
||||||
|
Help: "Finland.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west1",
|
Value: "europe-west1",
|
||||||
Help: "Belgium.",
|
Help: "Belgium.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west2",
|
Value: "europe-west2",
|
||||||
Help: "London.",
|
Help: "London.",
|
||||||
|
}, {
|
||||||
|
Value: "europe-west3",
|
||||||
|
Help: "Frankfurt.",
|
||||||
|
}, {
|
||||||
|
Value: "europe-west4",
|
||||||
|
Help: "Netherlands.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-central1",
|
Value: "us-central1",
|
||||||
Help: "Iowa.",
|
Help: "Iowa.",
|
||||||
@@ -189,6 +219,9 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Value: "us-west1",
|
Value: "us-west1",
|
||||||
Help: "Oregon.",
|
Help: "Oregon.",
|
||||||
|
}, {
|
||||||
|
Value: "us-west2",
|
||||||
|
Help: "California.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
@@ -223,22 +256,23 @@ type Options struct {
|
|||||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||||
ObjectACL string `config:"object_acl"`
|
ObjectACL string `config:"object_acl"`
|
||||||
BucketACL string `config:"bucket_acl"`
|
BucketACL string `config:"bucket_acl"`
|
||||||
|
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||||
Location string `config:"location"`
|
Location string `config:"location"`
|
||||||
StorageClass string `config:"storage_class"`
|
StorageClass string `config:"storage_class"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
svc *storage.Service // the connection to the storage server
|
svc *storage.Service // the connection to the storage server
|
||||||
client *http.Client // authorized client
|
client *http.Client // authorized client
|
||||||
bucket string // the bucket we are working on
|
rootBucket string // bucket part of root (if any)
|
||||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
rootDirectory string // directory part of root (if any)
|
||||||
bucketOK bool // true if we have created the bucket
|
cache *bucket.Cache // cache of bucket status
|
||||||
pacer *pacer.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a storage object
|
// Object describes a storage object
|
||||||
@@ -263,18 +297,18 @@ func (f *Fs) Name() string {
|
|||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
// Root of the remote (as passed into NewFs)
|
||||||
func (f *Fs) Root() string {
|
func (f *Fs) Root() string {
|
||||||
if f.root == "" {
|
return f.root
|
||||||
return f.bucket
|
|
||||||
}
|
|
||||||
return f.bucket + "/" + f.root
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// String converts this Fs to a string
|
// String converts this Fs to a string
|
||||||
func (f *Fs) String() string {
|
func (f *Fs) String() string {
|
||||||
if f.root == "" {
|
if f.rootBucket == "" {
|
||||||
return fmt.Sprintf("Storage bucket %s", f.bucket)
|
return fmt.Sprintf("GCS root")
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
|
if f.rootDirectory == "" {
|
||||||
|
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
// Features returns the optional features of this Fs
|
||||||
@@ -282,7 +316,7 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetry determines whehter a given err rates being retried
|
// shouldRetry determines whether a given err rates being retried
|
||||||
func shouldRetry(err error) (again bool, errOut error) {
|
func shouldRetry(err error) (again bool, errOut error) {
|
||||||
again = false
|
again = false
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -306,21 +340,23 @@ func shouldRetry(err error) (again bool, errOut error) {
|
|||||||
return again, err
|
return again, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pattern to match a storage path
|
// parsePath parses a remote 'url'
|
||||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
func parsePath(path string) (root string) {
|
||||||
|
root = strings.Trim(path, "/")
|
||||||
// parseParse parses a storage 'url'
|
|
||||||
func parsePath(path string) (bucket, directory string, err error) {
|
|
||||||
parts := matcher.FindStringSubmatch(path)
|
|
||||||
if parts == nil {
|
|
||||||
err = errors.Errorf("couldn't find bucket in storage path %q", path)
|
|
||||||
} else {
|
|
||||||
bucket, directory = parts[1], parts[2]
|
|
||||||
directory = strings.Trim(directory, "/")
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// split returns bucket and bucketPath from the rootRelativePath
|
||||||
|
// relative to f.root
|
||||||
|
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||||
|
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
// split returns bucket and bucketPath from the object
|
||||||
|
func (o *Object) split() (bucket, bucketPath string) {
|
||||||
|
return o.fs.split(o.remote)
|
||||||
|
}
|
||||||
|
|
||||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -330,7 +366,13 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
|||||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, bucket:path
|
// setRoot changes the root of the Fs
|
||||||
|
func (f *Fs) setRoot(root string) {
|
||||||
|
f.root = parsePath(root)
|
||||||
|
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
var oAuthClient *http.Client
|
var oAuthClient *http.Client
|
||||||
|
|
||||||
@@ -363,26 +405,27 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
ctx := context.Background()
|
||||||
|
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bucket, directory, err := parsePath(root)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
bucket: bucket,
|
root: root,
|
||||||
root: directory,
|
opt: *opt,
|
||||||
opt: *opt,
|
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
cache: bucket.NewCache(),
|
||||||
}
|
}
|
||||||
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
|
BucketBasedRootOK: true,
|
||||||
}).Fill(f)
|
}).Fill(f)
|
||||||
|
|
||||||
// Create a new authorized Drive client.
|
// Create a new authorized Drive client.
|
||||||
@@ -392,20 +435,18 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.root != "" {
|
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||||
f.root += "/"
|
|
||||||
// Check to see if the object exists
|
// Check to see if the object exists
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
_, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
f.root = path.Dir(directory)
|
newRoot := path.Dir(f.root)
|
||||||
if f.root == "." {
|
if newRoot == "." {
|
||||||
f.root = ""
|
newRoot = ""
|
||||||
} else {
|
|
||||||
f.root += "/"
|
|
||||||
}
|
}
|
||||||
|
f.setRoot(newRoot)
|
||||||
// return an error with an fs which points to the parent
|
// return an error with an fs which points to the parent
|
||||||
return f, fs.ErrorIsFile
|
return f, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
@@ -434,7 +475,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -446,13 +487,17 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
|||||||
// dir is the starting directory, "" for root
|
// dir is the starting directory, "" for root
|
||||||
//
|
//
|
||||||
// Set recurse to read sub directories
|
// Set recurse to read sub directories
|
||||||
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
//
|
||||||
root := f.root
|
// The remote has prefix removed from it and if addBucket is set
|
||||||
rootLength := len(root)
|
// then it adds the bucket to the start.
|
||||||
if dir != "" {
|
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
|
||||||
root += dir + "/"
|
if prefix != "" {
|
||||||
|
prefix += "/"
|
||||||
}
|
}
|
||||||
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
|
if directory != "" {
|
||||||
|
directory += "/"
|
||||||
|
}
|
||||||
|
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||||
if !recurse {
|
if !recurse {
|
||||||
list = list.Delimiter("/")
|
list = list.Delimiter("/")
|
||||||
}
|
}
|
||||||
@@ -472,31 +517,36 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
|||||||
}
|
}
|
||||||
if !recurse {
|
if !recurse {
|
||||||
var object storage.Object
|
var object storage.Object
|
||||||
for _, prefix := range objects.Prefixes {
|
for _, remote := range objects.Prefixes {
|
||||||
if !strings.HasSuffix(prefix, "/") {
|
if !strings.HasSuffix(remote, "/") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
|
if !strings.HasPrefix(remote, prefix) {
|
||||||
|
fs.Logf(f, "Odd name received %q", remote)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
remote = remote[len(prefix) : len(remote)-1]
|
||||||
|
if addBucket {
|
||||||
|
remote = path.Join(bucket, remote)
|
||||||
|
}
|
||||||
|
err = fn(remote, &object, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, object := range objects.Items {
|
for _, object := range objects.Items {
|
||||||
if !strings.HasPrefix(object.Name, root) {
|
if !strings.HasPrefix(object.Name, prefix) {
|
||||||
fs.Logf(f, "Odd name received %q", object.Name)
|
fs.Logf(f, "Odd name received %q", object.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
remote := object.Name[rootLength:]
|
remote := object.Name[len(prefix):]
|
||||||
|
isDirectory := strings.HasSuffix(remote, "/")
|
||||||
|
if addBucket {
|
||||||
|
remote = path.Join(bucket, remote)
|
||||||
|
}
|
||||||
// is this a directory marker?
|
// is this a directory marker?
|
||||||
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
if isDirectory && object.Size == 0 {
|
||||||
if recurse && remote != "" {
|
|
||||||
// add a directory in if --fast-list since will have no prefixes
|
|
||||||
err = fn(remote[:len(remote)-1], object, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue // skip directory marker
|
continue // skip directory marker
|
||||||
}
|
}
|
||||||
err = fn(remote, object, false)
|
err = fn(remote, object, false)
|
||||||
@@ -525,19 +575,10 @@ func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory b
|
|||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// mark the bucket as being OK
|
|
||||||
func (f *Fs) markBucketOK() {
|
|
||||||
if f.bucket != "" {
|
|
||||||
f.bucketOKMu.Lock()
|
|
||||||
f.bucketOK = true
|
|
||||||
f.bucketOKMu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||||
// List the objects
|
// List the objects
|
||||||
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -551,15 +592,12 @@ func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// bucket must be present if listing succeeded
|
// bucket must be present if listing succeeded
|
||||||
f.markBucketOK()
|
f.cache.MarkOK(bucket)
|
||||||
return entries, err
|
return entries, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// listBuckets lists the buckets
|
// listBuckets lists the buckets
|
||||||
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
if dir != "" {
|
|
||||||
return nil, fs.ErrorListBucketRequired
|
|
||||||
}
|
|
||||||
if f.opt.ProjectNumber == "" {
|
if f.opt.ProjectNumber == "" {
|
||||||
return nil, errors.New("can't list buckets without project number")
|
return nil, errors.New("can't list buckets without project number")
|
||||||
}
|
}
|
||||||
@@ -594,11 +632,15 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if f.bucket == "" {
|
bucket, directory := f.split(dir)
|
||||||
return f.listBuckets(dir)
|
if bucket == "" {
|
||||||
|
if directory != "" {
|
||||||
|
return nil, fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
|
return f.listBuckets(ctx)
|
||||||
}
|
}
|
||||||
return f.listDir(dir)
|
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -617,23 +659,44 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
if f.bucket == "" {
|
bucket, directory := f.split(dir)
|
||||||
return fs.ErrorListBucketRequired
|
|
||||||
}
|
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||||
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return list.Add(entry)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if bucket == "" {
|
||||||
|
entries, err := f.listBuckets(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return list.Add(entry)
|
for _, entry := range entries {
|
||||||
})
|
err = list.Add(entry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
bucket := entry.Remote()
|
||||||
|
err = listR(bucket, "", f.rootDirectory, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// bucket must be present if listing succeeded
|
||||||
|
f.cache.MarkOK(bucket)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// bucket must be present if listing succeeded
|
||||||
|
f.cache.MarkOK(bucket)
|
||||||
}
|
}
|
||||||
// bucket must be present if listing succeeded
|
|
||||||
f.markBucketOK()
|
|
||||||
return list.Flush()
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -642,83 +705,88 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the bucket if it doesn't exist
|
// Mkdir creates the bucket if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) (err error) {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||||
f.bucketOKMu.Lock()
|
bucket, _ := f.split(dir)
|
||||||
defer f.bucketOKMu.Unlock()
|
return f.makeBucket(ctx, bucket)
|
||||||
if f.bucketOK {
|
}
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
|
||||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
// makeBucket creates the bucket if it doesn't exist
|
||||||
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
|
func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||||
return shouldRetry(err)
|
return f.cache.Create(bucket, func() error {
|
||||||
})
|
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||||
if err == nil {
|
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||||
// Bucket already exists
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
f.bucketOK = true
|
_, err = f.svc.Objects.List(bucket).MaxResults(1).Do()
|
||||||
return nil
|
return shouldRetry(err)
|
||||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
})
|
||||||
if gErr.Code != http.StatusNotFound {
|
if err == nil {
|
||||||
|
// Bucket already exists
|
||||||
|
return nil
|
||||||
|
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||||
|
if gErr.Code != http.StatusNotFound {
|
||||||
|
return errors.Wrap(err, "failed to get bucket")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
return errors.Wrap(err, "failed to get bucket")
|
return errors.Wrap(err, "failed to get bucket")
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
return errors.Wrap(err, "failed to get bucket")
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.opt.ProjectNumber == "" {
|
if f.opt.ProjectNumber == "" {
|
||||||
return errors.New("can't make bucket without project number")
|
return errors.New("can't make bucket without project number")
|
||||||
}
|
}
|
||||||
|
|
||||||
bucket := storage.Bucket{
|
bucket := storage.Bucket{
|
||||||
Name: f.bucket,
|
Name: bucket,
|
||||||
Location: f.opt.Location,
|
Location: f.opt.Location,
|
||||||
StorageClass: f.opt.StorageClass,
|
StorageClass: f.opt.StorageClass,
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
if f.opt.BucketPolicyOnly {
|
||||||
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||||
return shouldRetry(err)
|
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||||
})
|
Enabled: true,
|
||||||
if err == nil {
|
},
|
||||||
f.bucketOK = true
|
}
|
||||||
}
|
}
|
||||||
return err
|
return f.pacer.Call(func() (bool, error) {
|
||||||
|
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||||
|
if !f.opt.BucketPolicyOnly {
|
||||||
|
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||||
|
}
|
||||||
|
_, err = insertBucket.Do()
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir deletes the bucket if the fs is at the root
|
// Rmdir deletes the bucket if the fs is at the root
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||||
// to delete was not empty.
|
// to delete was not empty.
|
||||||
func (f *Fs) Rmdir(dir string) (err error) {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||||
f.bucketOKMu.Lock()
|
bucket, directory := f.split(dir)
|
||||||
defer f.bucketOKMu.Unlock()
|
if bucket == "" || directory != "" {
|
||||||
if f.root != "" || dir != "" {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
return f.cache.Remove(bucket, func() error {
|
||||||
err = f.svc.Buckets.Delete(f.bucket).Do()
|
return f.pacer.Call(func() (bool, error) {
|
||||||
return shouldRetry(err)
|
err = f.svc.Buckets.Delete(bucket).Do()
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
if err == nil {
|
|
||||||
f.bucketOK = false
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision returns the precision
|
// Precision returns the precision
|
||||||
@@ -735,8 +803,9 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
err := f.Mkdir("")
|
dstBucket, dstPath := f.split(remote)
|
||||||
|
err := f.makeBucket(ctx, dstBucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -745,6 +814,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
srcBucket, srcPath := srcObj.split()
|
||||||
|
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
dstObj := &Object{
|
dstObj := &Object{
|
||||||
@@ -752,13 +822,9 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
|
|
||||||
srcBucket := srcObj.fs.bucket
|
|
||||||
srcObject := srcObj.fs.root + srcObj.remote
|
|
||||||
dstBucket := f.bucket
|
|
||||||
dstObject := f.root + remote
|
|
||||||
var newObject *storage.Object
|
var newObject *storage.Object
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
newObject, err = f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -795,7 +861,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -848,9 +914,10 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
if !o.modTime.IsZero() {
|
if !o.modTime.IsZero() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
bucket, bucketPath := o.split()
|
||||||
var object *storage.Object
|
var object *storage.Object
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -869,7 +936,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// fs.Logf(o, "Failed to read metadata: %v", err)
|
// fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
@@ -886,16 +953,17 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) (err error) {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||||
// This only adds metadata so will perserve other metadata
|
// This only adds metadata so will perserve other metadata
|
||||||
|
bucket, bucketPath := o.split()
|
||||||
object := storage.Object{
|
object := storage.Object{
|
||||||
Bucket: o.fs.bucket,
|
Bucket: bucket,
|
||||||
Name: o.fs.root + o.remote,
|
Name: bucketPath,
|
||||||
Metadata: metadataFromModTime(modTime),
|
Metadata: metadataFromModTime(modTime),
|
||||||
}
|
}
|
||||||
var newObject *storage.Object
|
var newObject *storage.Object
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
newObject, err = o.fs.svc.Objects.Patch(bucket, bucketPath, &object).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -911,11 +979,12 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
req, err := http.NewRequest("GET", o.url, nil)
|
req, err := http.NewRequest("GET", o.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
fs.FixRangeOption(options, o.bytes)
|
||||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||||
var res *http.Response
|
var res *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -942,23 +1011,27 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
err := o.fs.Mkdir("")
|
bucket, bucketPath := o.split()
|
||||||
|
err := o.fs.makeBucket(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
object := storage.Object{
|
object := storage.Object{
|
||||||
Bucket: o.fs.bucket,
|
Bucket: bucket,
|
||||||
Name: o.fs.root + o.remote,
|
Name: bucketPath,
|
||||||
ContentType: fs.MimeType(src),
|
ContentType: fs.MimeType(ctx, src),
|
||||||
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
|
||||||
Metadata: metadataFromModTime(modTime),
|
Metadata: metadataFromModTime(modTime),
|
||||||
}
|
}
|
||||||
var newObject *storage.Object
|
var newObject *storage.Object
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||||
|
if !o.fs.opt.BucketPolicyOnly {
|
||||||
|
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||||
|
}
|
||||||
|
newObject, err = insertObject.Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -970,16 +1043,17 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
|
bucket, bucketPath := o.split()
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,12 @@
|
|||||||
// Test GoogleCloudStorage filesystem interface
|
// Test GoogleCloudStorage filesystem interface
|
||||||
|
|
||||||
// +build go1.9
|
|
||||||
|
|
||||||
package googlecloudstorage_test
|
package googlecloudstorage_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/googlecloudstorage"
|
"github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
// Build for unsupported platforms to stop go complaining
|
|
||||||
// about "no buildable Go source files "
|
|
||||||
|
|
||||||
// +build !go1.9
|
|
||||||
|
|
||||||
package googlecloudstorage
|
|
||||||
148
backend/googlephotos/albums.go
Normal file
148
backend/googlephotos/albums.go
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
// This file contains the albums abstraction
|
||||||
|
|
||||||
|
package googlephotos
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
// All the albums
|
||||||
|
type albums struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
dupes map[string][]*api.Album // duplicated names
|
||||||
|
byID map[string]*api.Album //..indexed by ID
|
||||||
|
byTitle map[string]*api.Album //..indexed by Title
|
||||||
|
path map[string][]string // partial album names to directory
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new album
|
||||||
|
func newAlbums() *albums {
|
||||||
|
return &albums{
|
||||||
|
dupes: map[string][]*api.Album{},
|
||||||
|
byID: map[string]*api.Album{},
|
||||||
|
byTitle: map[string]*api.Album{},
|
||||||
|
path: map[string][]string{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add an album
|
||||||
|
func (as *albums) add(album *api.Album) {
|
||||||
|
// Munge the name of the album into a sensible path name
|
||||||
|
album.Title = path.Clean(album.Title)
|
||||||
|
if album.Title == "." || album.Title == "/" {
|
||||||
|
album.Title = addID("", album.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
as.mu.Lock()
|
||||||
|
as._add(album)
|
||||||
|
as.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// _add an album - call with lock held
|
||||||
|
func (as *albums) _add(album *api.Album) {
|
||||||
|
// update dupes by title
|
||||||
|
dupes := as.dupes[album.Title]
|
||||||
|
dupes = append(dupes, album)
|
||||||
|
as.dupes[album.Title] = dupes
|
||||||
|
|
||||||
|
// Dedupe the album name if necessary
|
||||||
|
if len(dupes) >= 2 {
|
||||||
|
// If this is the first dupe, then need to adjust the first one
|
||||||
|
if len(dupes) == 2 {
|
||||||
|
firstAlbum := dupes[0]
|
||||||
|
as._del(firstAlbum)
|
||||||
|
as._add(firstAlbum)
|
||||||
|
// undo add of firstAlbum to dupes
|
||||||
|
as.dupes[album.Title] = dupes
|
||||||
|
}
|
||||||
|
album.Title = addID(album.Title, album.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the new album
|
||||||
|
as.byID[album.ID] = album
|
||||||
|
as.byTitle[album.Title] = album
|
||||||
|
|
||||||
|
// Store the partial paths
|
||||||
|
dir, leaf := album.Title, ""
|
||||||
|
for dir != "" {
|
||||||
|
i := strings.LastIndex(dir, "/")
|
||||||
|
if i >= 0 {
|
||||||
|
dir, leaf = dir[:i], dir[i+1:]
|
||||||
|
} else {
|
||||||
|
dir, leaf = "", dir
|
||||||
|
}
|
||||||
|
dirs := as.path[dir]
|
||||||
|
found := false
|
||||||
|
for _, dir := range dirs {
|
||||||
|
if dir == leaf {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
as.path[dir] = append(as.path[dir], leaf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// del an album
|
||||||
|
func (as *albums) del(album *api.Album) {
|
||||||
|
as.mu.Lock()
|
||||||
|
as._del(album)
|
||||||
|
as.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// _del an album - call with lock held
|
||||||
|
func (as *albums) _del(album *api.Album) {
|
||||||
|
// We leave in dupes so it doesn't cause albums to get renamed
|
||||||
|
|
||||||
|
// Remove from byID and byTitle
|
||||||
|
delete(as.byID, album.ID)
|
||||||
|
delete(as.byTitle, album.Title)
|
||||||
|
|
||||||
|
// Remove from paths
|
||||||
|
dir, leaf := album.Title, ""
|
||||||
|
for dir != "" {
|
||||||
|
// Can't delete if this dir exists anywhere in the path structure
|
||||||
|
if _, found := as.path[dir]; found {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i := strings.LastIndex(dir, "/")
|
||||||
|
if i >= 0 {
|
||||||
|
dir, leaf = dir[:i], dir[i+1:]
|
||||||
|
} else {
|
||||||
|
dir, leaf = "", dir
|
||||||
|
}
|
||||||
|
dirs := as.path[dir]
|
||||||
|
for i, dir := range dirs {
|
||||||
|
if dir == leaf {
|
||||||
|
dirs = append(dirs[:i], dirs[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(dirs) == 0 {
|
||||||
|
delete(as.path, dir)
|
||||||
|
} else {
|
||||||
|
as.path[dir] = dirs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get an album by title
|
||||||
|
func (as *albums) get(title string) (album *api.Album, ok bool) {
|
||||||
|
as.mu.Lock()
|
||||||
|
defer as.mu.Unlock()
|
||||||
|
album, ok = as.byTitle[title]
|
||||||
|
return album, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDirs gets directories below an album path
|
||||||
|
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
|
||||||
|
as.mu.Lock()
|
||||||
|
defer as.mu.Unlock()
|
||||||
|
dirs, ok = as.path[albumPath]
|
||||||
|
return dirs, ok
|
||||||
|
}
|
||||||
311
backend/googlephotos/albums_test.go
Normal file
311
backend/googlephotos/albums_test.go
Normal file
@@ -0,0 +1,311 @@
|
|||||||
|
package googlephotos
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewAlbums(t *testing.T) {
|
||||||
|
albums := newAlbums()
|
||||||
|
assert.NotNil(t, albums.dupes)
|
||||||
|
assert.NotNil(t, albums.byID)
|
||||||
|
assert.NotNil(t, albums.byTitle)
|
||||||
|
assert.NotNil(t, albums.path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlbumsAdd(t *testing.T) {
|
||||||
|
albums := newAlbums()
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{}, albums.path)
|
||||||
|
|
||||||
|
a1 := &api.Album{
|
||||||
|
Title: "one",
|
||||||
|
ID: "1",
|
||||||
|
}
|
||||||
|
albums.add(a1)
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{
|
||||||
|
"one": []*api.Album{a1},
|
||||||
|
}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"1": a1,
|
||||||
|
}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"one": a1,
|
||||||
|
}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{
|
||||||
|
"": []string{"one"},
|
||||||
|
}, albums.path)
|
||||||
|
|
||||||
|
a2 := &api.Album{
|
||||||
|
Title: "two",
|
||||||
|
ID: "2",
|
||||||
|
}
|
||||||
|
albums.add(a2)
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{
|
||||||
|
"one": []*api.Album{a1},
|
||||||
|
"two": []*api.Album{a2},
|
||||||
|
}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"1": a1,
|
||||||
|
"2": a2,
|
||||||
|
}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"one": a1,
|
||||||
|
"two": a2,
|
||||||
|
}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{
|
||||||
|
"": []string{"one", "two"},
|
||||||
|
}, albums.path)
|
||||||
|
|
||||||
|
// Add a duplicate
|
||||||
|
a2a := &api.Album{
|
||||||
|
Title: "two",
|
||||||
|
ID: "2a",
|
||||||
|
}
|
||||||
|
albums.add(a2a)
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{
|
||||||
|
"one": []*api.Album{a1},
|
||||||
|
"two": []*api.Album{a2, a2a},
|
||||||
|
}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"1": a1,
|
||||||
|
"2": a2,
|
||||||
|
"2a": a2a,
|
||||||
|
}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"one": a1,
|
||||||
|
"two {2}": a2,
|
||||||
|
"two {2a}": a2a,
|
||||||
|
}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{
|
||||||
|
"": []string{"one", "two {2}", "two {2a}"},
|
||||||
|
}, albums.path)
|
||||||
|
|
||||||
|
// Add a sub directory
|
||||||
|
a1sub := &api.Album{
|
||||||
|
Title: "one/sub",
|
||||||
|
ID: "1sub",
|
||||||
|
}
|
||||||
|
albums.add(a1sub)
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{
|
||||||
|
"one": []*api.Album{a1},
|
||||||
|
"two": []*api.Album{a2, a2a},
|
||||||
|
"one/sub": []*api.Album{a1sub},
|
||||||
|
}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"1": a1,
|
||||||
|
"2": a2,
|
||||||
|
"2a": a2a,
|
||||||
|
"1sub": a1sub,
|
||||||
|
}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"one": a1,
|
||||||
|
"one/sub": a1sub,
|
||||||
|
"two {2}": a2,
|
||||||
|
"two {2a}": a2a,
|
||||||
|
}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{
|
||||||
|
"": []string{"one", "two {2}", "two {2a}"},
|
||||||
|
"one": []string{"sub"},
|
||||||
|
}, albums.path)
|
||||||
|
|
||||||
|
// Add a weird path
|
||||||
|
a0 := &api.Album{
|
||||||
|
Title: "/../././..////.",
|
||||||
|
ID: "0",
|
||||||
|
}
|
||||||
|
albums.add(a0)
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{
|
||||||
|
"{0}": []*api.Album{a0},
|
||||||
|
"one": []*api.Album{a1},
|
||||||
|
"two": []*api.Album{a2, a2a},
|
||||||
|
"one/sub": []*api.Album{a1sub},
|
||||||
|
}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"0": a0,
|
||||||
|
"1": a1,
|
||||||
|
"2": a2,
|
||||||
|
"2a": a2a,
|
||||||
|
"1sub": a1sub,
|
||||||
|
}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"{0}": a0,
|
||||||
|
"one": a1,
|
||||||
|
"one/sub": a1sub,
|
||||||
|
"two {2}": a2,
|
||||||
|
"two {2a}": a2a,
|
||||||
|
}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{
|
||||||
|
"": []string{"one", "two {2}", "two {2a}", "{0}"},
|
||||||
|
"one": []string{"sub"},
|
||||||
|
}, albums.path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlbumsDel(t *testing.T) {
|
||||||
|
albums := newAlbums()
|
||||||
|
|
||||||
|
a1 := &api.Album{
|
||||||
|
Title: "one",
|
||||||
|
ID: "1",
|
||||||
|
}
|
||||||
|
albums.add(a1)
|
||||||
|
|
||||||
|
a2 := &api.Album{
|
||||||
|
Title: "two",
|
||||||
|
ID: "2",
|
||||||
|
}
|
||||||
|
albums.add(a2)
|
||||||
|
|
||||||
|
// Add a duplicate
|
||||||
|
a2a := &api.Album{
|
||||||
|
Title: "two",
|
||||||
|
ID: "2a",
|
||||||
|
}
|
||||||
|
albums.add(a2a)
|
||||||
|
|
||||||
|
// Add a sub directory
|
||||||
|
a1sub := &api.Album{
|
||||||
|
Title: "one/sub",
|
||||||
|
ID: "1sub",
|
||||||
|
}
|
||||||
|
albums.add(a1sub)
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{
|
||||||
|
"one": []*api.Album{a1},
|
||||||
|
"two": []*api.Album{a2, a2a},
|
||||||
|
"one/sub": []*api.Album{a1sub},
|
||||||
|
}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"1": a1,
|
||||||
|
"2": a2,
|
||||||
|
"2a": a2a,
|
||||||
|
"1sub": a1sub,
|
||||||
|
}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"one": a1,
|
||||||
|
"one/sub": a1sub,
|
||||||
|
"two {2}": a2,
|
||||||
|
"two {2a}": a2a,
|
||||||
|
}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{
|
||||||
|
"": []string{"one", "two {2}", "two {2a}"},
|
||||||
|
"one": []string{"sub"},
|
||||||
|
}, albums.path)
|
||||||
|
|
||||||
|
albums.del(a1)
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{
|
||||||
|
"one": []*api.Album{a1},
|
||||||
|
"two": []*api.Album{a2, a2a},
|
||||||
|
"one/sub": []*api.Album{a1sub},
|
||||||
|
}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"2": a2,
|
||||||
|
"2a": a2a,
|
||||||
|
"1sub": a1sub,
|
||||||
|
}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"one/sub": a1sub,
|
||||||
|
"two {2}": a2,
|
||||||
|
"two {2a}": a2a,
|
||||||
|
}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{
|
||||||
|
"": []string{"one", "two {2}", "two {2a}"},
|
||||||
|
"one": []string{"sub"},
|
||||||
|
}, albums.path)
|
||||||
|
|
||||||
|
albums.del(a2)
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{
|
||||||
|
"one": []*api.Album{a1},
|
||||||
|
"two": []*api.Album{a2, a2a},
|
||||||
|
"one/sub": []*api.Album{a1sub},
|
||||||
|
}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"2a": a2a,
|
||||||
|
"1sub": a1sub,
|
||||||
|
}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"one/sub": a1sub,
|
||||||
|
"two {2a}": a2a,
|
||||||
|
}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{
|
||||||
|
"": []string{"one", "two {2a}"},
|
||||||
|
"one": []string{"sub"},
|
||||||
|
}, albums.path)
|
||||||
|
|
||||||
|
albums.del(a2a)
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{
|
||||||
|
"one": []*api.Album{a1},
|
||||||
|
"two": []*api.Album{a2, a2a},
|
||||||
|
"one/sub": []*api.Album{a1sub},
|
||||||
|
}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"1sub": a1sub,
|
||||||
|
}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{
|
||||||
|
"one/sub": a1sub,
|
||||||
|
}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{
|
||||||
|
"": []string{"one"},
|
||||||
|
"one": []string{"sub"},
|
||||||
|
}, albums.path)
|
||||||
|
|
||||||
|
albums.del(a1sub)
|
||||||
|
|
||||||
|
assert.Equal(t, map[string][]*api.Album{
|
||||||
|
"one": []*api.Album{a1},
|
||||||
|
"two": []*api.Album{a2, a2a},
|
||||||
|
"one/sub": []*api.Album{a1sub},
|
||||||
|
}, albums.dupes)
|
||||||
|
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||||
|
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||||
|
assert.Equal(t, map[string][]string{}, albums.path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlbumsGet(t *testing.T) {
|
||||||
|
albums := newAlbums()
|
||||||
|
|
||||||
|
a1 := &api.Album{
|
||||||
|
Title: "one",
|
||||||
|
ID: "1",
|
||||||
|
}
|
||||||
|
albums.add(a1)
|
||||||
|
|
||||||
|
album, ok := albums.get("one")
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
assert.Equal(t, a1, album)
|
||||||
|
|
||||||
|
album, ok = albums.get("notfound")
|
||||||
|
assert.Equal(t, false, ok)
|
||||||
|
assert.Nil(t, album)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlbumsGetDirs(t *testing.T) {
|
||||||
|
albums := newAlbums()
|
||||||
|
|
||||||
|
a1 := &api.Album{
|
||||||
|
Title: "one",
|
||||||
|
ID: "1",
|
||||||
|
}
|
||||||
|
albums.add(a1)
|
||||||
|
|
||||||
|
dirs, ok := albums.getDirs("")
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
assert.Equal(t, []string{"one"}, dirs)
|
||||||
|
|
||||||
|
dirs, ok = albums.getDirs("notfound")
|
||||||
|
assert.Equal(t, false, ok)
|
||||||
|
assert.Nil(t, dirs)
|
||||||
|
}
|
||||||
190
backend/googlephotos/api/types.go
Normal file
190
backend/googlephotos/api/types.go
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrorDetails in the internals of the Error type
|
||||||
|
type ErrorDetails struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error is returned on errors
|
||||||
|
type Error struct {
|
||||||
|
Details ErrorDetails `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error statisfies error interface
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Album of photos
|
||||||
|
type Album struct {
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
ProductURL string `json:"productUrl,omitempty"`
|
||||||
|
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
|
||||||
|
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
|
||||||
|
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
|
||||||
|
IsWriteable bool `json:"isWriteable,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListAlbums is returned from albums.list and sharedAlbums.list
|
||||||
|
type ListAlbums struct {
|
||||||
|
Albums []Album `json:"albums"`
|
||||||
|
SharedAlbums []Album `json:"sharedAlbums"`
|
||||||
|
NextPageToken string `json:"nextPageToken"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateAlbum creates an Album
|
||||||
|
type CreateAlbum struct {
|
||||||
|
Album *Album `json:"album"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaItem is a photo or video
|
||||||
|
type MediaItem struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
ProductURL string `json:"productUrl"`
|
||||||
|
BaseURL string `json:"baseUrl"`
|
||||||
|
MimeType string `json:"mimeType"`
|
||||||
|
MediaMetadata struct {
|
||||||
|
CreationTime time.Time `json:"creationTime"`
|
||||||
|
Width string `json:"width"`
|
||||||
|
Height string `json:"height"`
|
||||||
|
Photo struct {
|
||||||
|
} `json:"photo"`
|
||||||
|
} `json:"mediaMetadata"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaItems is returned from mediaitems.list, mediaitems.search
|
||||||
|
type MediaItems struct {
|
||||||
|
MediaItems []MediaItem `json:"mediaItems"`
|
||||||
|
NextPageToken string `json:"nextPageToken"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//Content categories
|
||||||
|
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
||||||
|
// LANDSCAPES Media items containing landscapes.
|
||||||
|
// RECEIPTS Media items containing receipts.
|
||||||
|
// CITYSCAPES Media items containing cityscapes.
|
||||||
|
// LANDMARKS Media items containing landmarks.
|
||||||
|
// SELFIES Media items that are selfies.
|
||||||
|
// PEOPLE Media items containing people.
|
||||||
|
// PETS Media items containing pets.
|
||||||
|
// WEDDINGS Media items from weddings.
|
||||||
|
// BIRTHDAYS Media items from birthdays.
|
||||||
|
// DOCUMENTS Media items containing documents.
|
||||||
|
// TRAVEL Media items taken during travel.
|
||||||
|
// ANIMALS Media items containing animals.
|
||||||
|
// FOOD Media items containing food.
|
||||||
|
// SPORT Media items from sporting events.
|
||||||
|
// NIGHT Media items taken at night.
|
||||||
|
// PERFORMANCES Media items from performances.
|
||||||
|
// WHITEBOARDS Media items containing whiteboards.
|
||||||
|
// SCREENSHOTS Media items that are screenshots.
|
||||||
|
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
|
||||||
|
// ARTS Media items containing art.
|
||||||
|
// CRAFTS Media items containing crafts.
|
||||||
|
// FASHION Media items related to fashion.
|
||||||
|
// HOUSES Media items containing houses.
|
||||||
|
// GARDENS Media items containing gardens.
|
||||||
|
// FLOWERS Media items containing flowers.
|
||||||
|
// HOLIDAYS Media items taken of holidays.
|
||||||
|
|
||||||
|
// MediaTypes
|
||||||
|
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
|
||||||
|
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
|
||||||
|
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
|
||||||
|
|
||||||
|
// Features
|
||||||
|
// NONE Treated as if no filters are applied. All features are included.
|
||||||
|
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
|
||||||
|
|
||||||
|
// Date is used as part of SearchFilter
|
||||||
|
type Date struct {
|
||||||
|
Year int `json:"year,omitempty"`
|
||||||
|
Month int `json:"month,omitempty"`
|
||||||
|
Day int `json:"day,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DateFilter is uses to add date ranges to media item queries
|
||||||
|
type DateFilter struct {
|
||||||
|
Dates []Date `json:"dates,omitempty"`
|
||||||
|
Ranges []struct {
|
||||||
|
StartDate Date `json:"startDate,omitempty"`
|
||||||
|
EndDate Date `json:"endDate,omitempty"`
|
||||||
|
} `json:"ranges,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentFilter is uses to add content categories to media item queries
|
||||||
|
type ContentFilter struct {
|
||||||
|
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
|
||||||
|
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaTypeFilter is uses to add media types to media item queries
|
||||||
|
type MediaTypeFilter struct {
|
||||||
|
MediaTypes []string `json:"mediaTypes,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FeatureFilter is uses to add features to media item queries
|
||||||
|
type FeatureFilter struct {
|
||||||
|
IncludedFeatures []string `json:"includedFeatures,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filters combines all the filter types for media item queries
|
||||||
|
type Filters struct {
|
||||||
|
DateFilter *DateFilter `json:"dateFilter,omitempty"`
|
||||||
|
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
|
||||||
|
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
|
||||||
|
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
|
||||||
|
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
|
||||||
|
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchFilter is uses with mediaItems.search
|
||||||
|
type SearchFilter struct {
|
||||||
|
AlbumID string `json:"albumId,omitempty"`
|
||||||
|
PageSize int `json:"pageSize"`
|
||||||
|
PageToken string `json:"pageToken,omitempty"`
|
||||||
|
Filters *Filters `json:"filters,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SimpleMediaItem is part of NewMediaItem
|
||||||
|
type SimpleMediaItem struct {
|
||||||
|
UploadToken string `json:"uploadToken"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMediaItem is a single media item for upload
|
||||||
|
type NewMediaItem struct {
|
||||||
|
Description string `json:"description"`
|
||||||
|
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchCreateRequest creates media items from upload tokens
|
||||||
|
type BatchCreateRequest struct {
|
||||||
|
AlbumID string `json:"albumId,omitempty"`
|
||||||
|
NewMediaItems []NewMediaItem `json:"newMediaItems"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchCreateResponse is returned from BatchCreateRequest
|
||||||
|
type BatchCreateResponse struct {
|
||||||
|
NewMediaItemResults []struct {
|
||||||
|
UploadToken string `json:"uploadToken"`
|
||||||
|
Status struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
Code int `json:"code"`
|
||||||
|
} `json:"status"`
|
||||||
|
MediaItem MediaItem `json:"mediaItem"`
|
||||||
|
} `json:"newMediaItemResults"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchRemoveItems is for removing items from an album
|
||||||
|
type BatchRemoveItems struct {
|
||||||
|
MediaItemIds []string `json:"mediaItemIds"`
|
||||||
|
}
|
||||||
1060
backend/googlephotos/googlephotos.go
Normal file
1060
backend/googlephotos/googlephotos.go
Normal file
File diff suppressed because it is too large
Load Diff
307
backend/googlephotos/googlephotos_test.go
Normal file
307
backend/googlephotos/googlephotos_test.go
Normal file
@@ -0,0 +1,307 @@
|
|||||||
|
package googlephotos
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"path"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/lib/random"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// We have two different files here as Google Photos will uniq
|
||||||
|
// them otherwise which confuses the tests as the filename is
|
||||||
|
// unexpected.
|
||||||
|
fileNameAlbum = "rclone-test-image1.jpg"
|
||||||
|
fileNameUpload = "rclone-test-image2.jpg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Wrapper to override the remote for an object
|
||||||
|
type overrideRemoteObject struct {
|
||||||
|
fs.Object
|
||||||
|
remote string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the overridden remote name
|
||||||
|
func (o *overrideRemoteObject) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
fstest.Initialise()
|
||||||
|
|
||||||
|
// Create Fs
|
||||||
|
if *fstest.RemoteName == "" {
|
||||||
|
*fstest.RemoteName = "TestGooglePhotos:"
|
||||||
|
}
|
||||||
|
f, err := fs.NewFs(*fstest.RemoteName)
|
||||||
|
if err == fs.ErrorNotFoundInConfigFile {
|
||||||
|
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create local Fs pointing at testfiles
|
||||||
|
localFs, err := fs.NewFs("testfiles")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("CreateAlbum", func(t *testing.T) {
|
||||||
|
albumName := "album/rclone-test-" + random.String(24)
|
||||||
|
err = f.Mkdir(ctx, albumName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
remote := albumName + "/" + fileNameAlbum
|
||||||
|
|
||||||
|
t.Run("PutFile", func(t *testing.T) {
|
||||||
|
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
|
||||||
|
require.NoError(t, err)
|
||||||
|
in, err := srcObj.Open(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, remote, dstObj.Remote())
|
||||||
|
_ = in.Close()
|
||||||
|
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
||||||
|
|
||||||
|
t.Run("ObjectFs", func(t *testing.T) {
|
||||||
|
assert.Equal(t, f, dstObj.Fs())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ObjectString", func(t *testing.T) {
|
||||||
|
assert.Equal(t, remote, dstObj.String())
|
||||||
|
assert.Equal(t, "<nil>", (*Object)(nil).String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ObjectHash", func(t *testing.T) {
|
||||||
|
h, err := dstObj.Hash(ctx, hash.MD5)
|
||||||
|
assert.Equal(t, "", h)
|
||||||
|
assert.Equal(t, hash.ErrUnsupported, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ObjectSize", func(t *testing.T) {
|
||||||
|
assert.Equal(t, int64(-1), dstObj.Size())
|
||||||
|
f.(*Fs).opt.ReadSize = true
|
||||||
|
defer func() {
|
||||||
|
f.(*Fs).opt.ReadSize = false
|
||||||
|
}()
|
||||||
|
size := dstObj.Size()
|
||||||
|
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ObjectSetModTime", func(t *testing.T) {
|
||||||
|
err := dstObj.SetModTime(ctx, time.Now())
|
||||||
|
assert.Equal(t, fs.ErrorCantSetModTime, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ObjectStorable", func(t *testing.T) {
|
||||||
|
assert.True(t, dstObj.Storable())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ObjectOpen", func(t *testing.T) {
|
||||||
|
in, err := dstObj.Open(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
buf, err := ioutil.ReadAll(in)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, in.Close())
|
||||||
|
assert.True(t, len(buf) > 1000)
|
||||||
|
contentType := http.DetectContentType(buf[:512])
|
||||||
|
assert.Equal(t, "image/jpeg", contentType)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CheckFileInAlbum", func(t *testing.T) {
|
||||||
|
entries, err := f.List(ctx, albumName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, len(entries))
|
||||||
|
assert.Equal(t, remote, entries[0].Remote())
|
||||||
|
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check it is there in the date/month/year heirachy
|
||||||
|
// 2013-07-13 is the creation date of the folder
|
||||||
|
checkPresent := func(t *testing.T, objPath string) {
|
||||||
|
entries, err := f.List(ctx, objPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
found := false
|
||||||
|
for _, entry := range entries {
|
||||||
|
leaf := path.Base(entry.Remote())
|
||||||
|
if leaf == fileNameAlbum || leaf == remoteWithID {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("CheckInByYear", func(t *testing.T) {
|
||||||
|
checkPresent(t, "media/by-year/2013")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CheckInByMonth", func(t *testing.T) {
|
||||||
|
checkPresent(t, "media/by-month/2013/2013-07")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CheckInByDay", func(t *testing.T) {
|
||||||
|
checkPresent(t, "media/by-day/2013/2013-07-26")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("NewObject", func(t *testing.T) {
|
||||||
|
o, err := f.NewObject(ctx, remote)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, remote, o.Remote())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("NewObjectWithID", func(t *testing.T) {
|
||||||
|
o, err := f.NewObject(ctx, remoteWithID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, remoteWithID, o.Remote())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("NewFsIsFile", func(t *testing.T) {
|
||||||
|
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
|
||||||
|
assert.Equal(t, fs.ErrorIsFile, err)
|
||||||
|
leaf := path.Base(remote)
|
||||||
|
o, err := fNew.NewObject(ctx, leaf)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, leaf, o.Remote())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
|
||||||
|
err = dstObj.Remove(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
// Check album empty
|
||||||
|
entries, err := f.List(ctx, albumName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(entries))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// remove the album
|
||||||
|
err = f.Rmdir(ctx, albumName)
|
||||||
|
require.Error(t, err) // FIXME doesn't work yet
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("UploadMkdir", func(t *testing.T) {
|
||||||
|
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
|
||||||
|
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
|
||||||
|
|
||||||
|
t.Run("List", func(t *testing.T) {
|
||||||
|
entries, err := f.List(ctx, "upload")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, len(entries))
|
||||||
|
assert.Equal(t, "upload/dir", entries[0].Remote())
|
||||||
|
|
||||||
|
entries, err = f.List(ctx, "upload/dir")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, len(entries))
|
||||||
|
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Rmdir", func(t *testing.T) {
|
||||||
|
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
|
||||||
|
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ListEmpty", func(t *testing.T) {
|
||||||
|
entries, err := f.List(ctx, "upload")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(entries))
|
||||||
|
|
||||||
|
_, err = f.List(ctx, "upload/dir")
|
||||||
|
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Upload", func(t *testing.T) {
|
||||||
|
uploadDir := "upload/dir/subdir"
|
||||||
|
remote := path.Join(uploadDir, fileNameUpload)
|
||||||
|
|
||||||
|
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
|
||||||
|
require.NoError(t, err)
|
||||||
|
in, err := srcObj.Open(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, remote, dstObj.Remote())
|
||||||
|
_ = in.Close()
|
||||||
|
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
||||||
|
|
||||||
|
t.Run("List", func(t *testing.T) {
|
||||||
|
entries, err := f.List(ctx, uploadDir)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(entries))
|
||||||
|
assert.Equal(t, remote, entries[0].Remote())
|
||||||
|
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("NewObject", func(t *testing.T) {
|
||||||
|
o, err := f.NewObject(ctx, remote)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, remote, o.Remote())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("NewObjectWithID", func(t *testing.T) {
|
||||||
|
o, err := f.NewObject(ctx, remoteWithID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, remoteWithID, o.Remote())
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Name", func(t *testing.T) {
|
||||||
|
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Root", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "", f.Root())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("String", func(t *testing.T) {
|
||||||
|
assert.Equal(t, `Google Photos path ""`, f.String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Features", func(t *testing.T) {
|
||||||
|
features := f.Features()
|
||||||
|
assert.False(t, features.CaseInsensitive)
|
||||||
|
assert.True(t, features.ReadMimeType)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Precision", func(t *testing.T) {
|
||||||
|
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Hashes", func(t *testing.T) {
|
||||||
|
assert.Equal(t, hash.Set(hash.None), f.Hashes())
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddID(t *testing.T) {
|
||||||
|
assert.Equal(t, "potato {123}", addID("potato", "123"))
|
||||||
|
assert.Equal(t, "{123}", addID("", "123"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFileAddID(t *testing.T) {
|
||||||
|
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
|
||||||
|
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
|
||||||
|
assert.Equal(t, "{123}", addFileID("", "123"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindID(t *testing.T) {
|
||||||
|
assert.Equal(t, "", findID("potato"))
|
||||||
|
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
|
||||||
|
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
|
||||||
|
ID = ID[1:]
|
||||||
|
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
|
||||||
|
}
|
||||||
335
backend/googlephotos/pattern.go
Normal file
335
backend/googlephotos/pattern.go
Normal file
@@ -0,0 +1,335 @@
|
|||||||
|
// Store the parsing of file patterns
|
||||||
|
|
||||||
|
package googlephotos
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// lister describes the subset of the interfaces on Fs needed for the
|
||||||
|
// file pattern parsing
|
||||||
|
type lister interface {
|
||||||
|
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
|
||||||
|
listAlbums(shared bool) (all *albums, err error)
|
||||||
|
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||||
|
dirTime() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// dirPattern describes a single directory pattern
|
||||||
|
type dirPattern struct {
|
||||||
|
re string // match for the path
|
||||||
|
match *regexp.Regexp // compiled match
|
||||||
|
canUpload bool // true if can upload here
|
||||||
|
canMkdir bool // true if can make a directory here
|
||||||
|
isFile bool // true if this is a file
|
||||||
|
isUpload bool // true if this is the upload directory
|
||||||
|
// function to turn a match into DirEntries
|
||||||
|
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dirPatters is a slice of all the directory patterns
|
||||||
|
type dirPatterns []dirPattern
|
||||||
|
|
||||||
|
// patterns describes the layout of the google photos backend file system.
|
||||||
|
//
|
||||||
|
// NB no trailing / on paths
|
||||||
|
var patterns = dirPatterns{
|
||||||
|
{
|
||||||
|
re: `^$`,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||||
|
return fs.DirEntries{
|
||||||
|
fs.NewDir(prefix+"media", f.dirTime()),
|
||||||
|
fs.NewDir(prefix+"album", f.dirTime()),
|
||||||
|
fs.NewDir(prefix+"shared-album", f.dirTime()),
|
||||||
|
fs.NewDir(prefix+"upload", f.dirTime()),
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^upload(?:/(.*))?$`,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||||
|
return f.listUploads(ctx, match[0])
|
||||||
|
},
|
||||||
|
canUpload: true,
|
||||||
|
canMkdir: true,
|
||||||
|
isUpload: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^upload/(.*)$`,
|
||||||
|
isFile: true,
|
||||||
|
canUpload: true,
|
||||||
|
isUpload: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media$`,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||||
|
return fs.DirEntries{
|
||||||
|
fs.NewDir(prefix+"all", f.dirTime()),
|
||||||
|
fs.NewDir(prefix+"by-year", f.dirTime()),
|
||||||
|
fs.NewDir(prefix+"by-month", f.dirTime()),
|
||||||
|
fs.NewDir(prefix+"by-day", f.dirTime()),
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/all$`,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||||
|
return f.listDir(ctx, prefix, api.SearchFilter{})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/all/([^/]+)$`,
|
||||||
|
isFile: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-year$`,
|
||||||
|
toEntries: years,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-year/(\d{4})$`,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||||
|
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.listDir(ctx, prefix, filter)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-year/(\d{4})/([^/]+)$`,
|
||||||
|
isFile: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-month$`,
|
||||||
|
toEntries: years,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-month/(\d{4})$`,
|
||||||
|
toEntries: months,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||||
|
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.listDir(ctx, prefix, filter)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
|
||||||
|
isFile: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-day$`,
|
||||||
|
toEntries: years,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-day/(\d{4})$`,
|
||||||
|
toEntries: days,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||||
|
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.listDir(ctx, prefix, filter)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
|
||||||
|
isFile: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^album$`,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||||
|
return albumsToEntries(ctx, f, false, prefix, "")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^album/(.+)$`,
|
||||||
|
canMkdir: true,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||||
|
return albumsToEntries(ctx, f, false, prefix, match[1])
|
||||||
|
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^album/(.+?)/([^/]+)$`,
|
||||||
|
canUpload: true,
|
||||||
|
isFile: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^shared-album$`,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||||
|
return albumsToEntries(ctx, f, true, prefix, "")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^shared-album/(.+)$`,
|
||||||
|
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||||
|
return albumsToEntries(ctx, f, true, prefix, match[1])
|
||||||
|
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
re: `^shared-album/(.+?)/([^/]+)$`,
|
||||||
|
isFile: true,
|
||||||
|
},
|
||||||
|
}.mustCompile()
|
||||||
|
|
||||||
|
// mustCompile compiles the regexps in the dirPatterns
|
||||||
|
func (ds dirPatterns) mustCompile() dirPatterns {
|
||||||
|
for i := range ds {
|
||||||
|
pattern := &ds[i]
|
||||||
|
pattern.match = regexp.MustCompile(pattern.re)
|
||||||
|
}
|
||||||
|
return ds
|
||||||
|
}
|
||||||
|
|
||||||
|
// match finds the path passed in in the matching structure and
|
||||||
|
// returns the parameters and a pointer to the match, or nil.
|
||||||
|
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
|
||||||
|
itemPath = strings.Trim(itemPath, "/")
|
||||||
|
absPath := path.Join(root, itemPath)
|
||||||
|
prefix = strings.Trim(absPath[len(root):], "/")
|
||||||
|
if prefix != "" {
|
||||||
|
prefix += "/"
|
||||||
|
}
|
||||||
|
for i := range ds {
|
||||||
|
pattern = &ds[i]
|
||||||
|
if pattern.isFile != isFile {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
match = pattern.match.FindStringSubmatch(absPath)
|
||||||
|
if match != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the years from 2000 to today
|
||||||
|
// FIXME make configurable?
|
||||||
|
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||||
|
currentYear := f.dirTime().Year()
|
||||||
|
for year := 2000; year <= currentYear; year++ {
|
||||||
|
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the months in a given year
|
||||||
|
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||||
|
year := match[1]
|
||||||
|
for month := 1; month <= 12; month++ {
|
||||||
|
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the days in a given year
|
||||||
|
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||||
|
year := match[1]
|
||||||
|
current, err := time.Parse("2006", year)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Errorf("bad year %q", match[1])
|
||||||
|
}
|
||||||
|
currentYear := current.Year()
|
||||||
|
for current.Year() == currentYear {
|
||||||
|
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
|
||||||
|
current = current.AddDate(0, 0, 1)
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This creates a search filter on year/month/day as provided
|
||||||
|
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
||||||
|
year, err := strconv.Atoi(match[1])
|
||||||
|
if err != nil || year < 1000 || year > 3000 {
|
||||||
|
return sf, errors.Errorf("bad year %q", match[1])
|
||||||
|
}
|
||||||
|
sf = api.SearchFilter{
|
||||||
|
Filters: &api.Filters{
|
||||||
|
DateFilter: &api.DateFilter{
|
||||||
|
Dates: []api.Date{
|
||||||
|
{
|
||||||
|
Year: year,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if len(match) >= 3 {
|
||||||
|
month, err := strconv.Atoi(match[2])
|
||||||
|
if err != nil || month < 1 || month > 12 {
|
||||||
|
return sf, errors.Errorf("bad month %q", match[2])
|
||||||
|
}
|
||||||
|
sf.Filters.DateFilter.Dates[0].Month = month
|
||||||
|
}
|
||||||
|
if len(match) >= 4 {
|
||||||
|
day, err := strconv.Atoi(match[3])
|
||||||
|
if err != nil || day < 1 || day > 31 {
|
||||||
|
return sf, errors.Errorf("bad day %q", match[3])
|
||||||
|
}
|
||||||
|
sf.Filters.DateFilter.Dates[0].Day = day
|
||||||
|
}
|
||||||
|
return sf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Turns an albumPath into entries
|
||||||
|
//
|
||||||
|
// These can either be synthetic directory entries if the album path
|
||||||
|
// is a prefix of another album, or actual files, or a combination of
|
||||||
|
// the two.
|
||||||
|
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
|
||||||
|
albums, err := f.listAlbums(shared)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Put in the directories
|
||||||
|
dirs, foundAlbumPath := albums.getDirs(albumPath)
|
||||||
|
if foundAlbumPath {
|
||||||
|
for _, dir := range dirs {
|
||||||
|
d := fs.NewDir(prefix+dir, f.dirTime())
|
||||||
|
dirPath := path.Join(albumPath, dir)
|
||||||
|
// if this dir is an album add more special stuff
|
||||||
|
album, ok := albums.get(dirPath)
|
||||||
|
if ok {
|
||||||
|
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(f, "Error reading media count: %v", err)
|
||||||
|
}
|
||||||
|
d.SetID(album.ID).SetItems(count)
|
||||||
|
}
|
||||||
|
entries = append(entries, d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if this is an album then return a filter to list it
|
||||||
|
album, foundAlbum := albums.get(albumPath)
|
||||||
|
if foundAlbum {
|
||||||
|
filter := api.SearchFilter{AlbumID: album.ID}
|
||||||
|
newEntries, err := f.listDir(ctx, prefix, filter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entries = append(entries, newEntries...)
|
||||||
|
}
|
||||||
|
if !foundAlbumPath && !foundAlbum && albumPath != "" {
|
||||||
|
return nil, fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
495
backend/googlephotos/pattern_test.go
Normal file
495
backend/googlephotos/pattern_test.go
Normal file
@@ -0,0 +1,495 @@
|
|||||||
|
package googlephotos
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/dirtree"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/fstest/mockobject"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// time for directories
|
||||||
|
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
|
||||||
|
|
||||||
|
// mock Fs for testing patterns
|
||||||
|
type testLister struct {
|
||||||
|
t *testing.T
|
||||||
|
albums *albums
|
||||||
|
names []string
|
||||||
|
uploaded dirtree.DirTree
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTestLister makes a mock for testing
|
||||||
|
func newTestLister(t *testing.T) *testLister {
|
||||||
|
return &testLister{
|
||||||
|
t: t,
|
||||||
|
albums: newAlbums(),
|
||||||
|
uploaded: dirtree.New(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mock listDir for testing
|
||||||
|
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||||
|
for _, name := range f.names {
|
||||||
|
entries = append(entries, mockobject.New(prefix+name))
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mock listAlbums for testing
|
||||||
|
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
|
||||||
|
return f.albums, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mock listUploads for testing
|
||||||
|
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
entries, _ = f.uploaded[dir]
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mock dirTime for testing
|
||||||
|
func (f *testLister) dirTime() time.Time {
|
||||||
|
return startTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatternMatch(t *testing.T) {
|
||||||
|
for testNumber, test := range []struct {
|
||||||
|
// input
|
||||||
|
root string
|
||||||
|
itemPath string
|
||||||
|
isFile bool
|
||||||
|
// expected output
|
||||||
|
wantMatch []string
|
||||||
|
wantPrefix string
|
||||||
|
wantPattern *dirPattern
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
root: "",
|
||||||
|
itemPath: "",
|
||||||
|
isFile: false,
|
||||||
|
wantMatch: []string{""},
|
||||||
|
wantPrefix: "",
|
||||||
|
wantPattern: &patterns[0],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "",
|
||||||
|
itemPath: "",
|
||||||
|
isFile: true,
|
||||||
|
wantMatch: nil,
|
||||||
|
wantPrefix: "",
|
||||||
|
wantPattern: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "upload",
|
||||||
|
itemPath: "",
|
||||||
|
isFile: false,
|
||||||
|
wantMatch: []string{"upload", ""},
|
||||||
|
wantPrefix: "",
|
||||||
|
wantPattern: &patterns[1],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "upload/dir",
|
||||||
|
itemPath: "",
|
||||||
|
isFile: false,
|
||||||
|
wantMatch: []string{"upload/dir", "dir"},
|
||||||
|
wantPrefix: "",
|
||||||
|
wantPattern: &patterns[1],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "upload/file.jpg",
|
||||||
|
itemPath: "",
|
||||||
|
isFile: true,
|
||||||
|
wantMatch: []string{"upload/file.jpg", "file.jpg"},
|
||||||
|
wantPrefix: "",
|
||||||
|
wantPattern: &patterns[2],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media",
|
||||||
|
itemPath: "",
|
||||||
|
isFile: false,
|
||||||
|
wantMatch: []string{"media"},
|
||||||
|
wantPrefix: "",
|
||||||
|
wantPattern: &patterns[3],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "",
|
||||||
|
itemPath: "media",
|
||||||
|
isFile: false,
|
||||||
|
wantMatch: []string{"media"},
|
||||||
|
wantPrefix: "media/",
|
||||||
|
wantPattern: &patterns[3],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/all",
|
||||||
|
itemPath: "",
|
||||||
|
isFile: false,
|
||||||
|
wantMatch: []string{"media/all"},
|
||||||
|
wantPrefix: "",
|
||||||
|
wantPattern: &patterns[4],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media",
|
||||||
|
itemPath: "all",
|
||||||
|
isFile: false,
|
||||||
|
wantMatch: []string{"media/all"},
|
||||||
|
wantPrefix: "all/",
|
||||||
|
wantPattern: &patterns[4],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/all",
|
||||||
|
itemPath: "file.jpg",
|
||||||
|
isFile: true,
|
||||||
|
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
|
||||||
|
wantPrefix: "file.jpg/",
|
||||||
|
wantPattern: &patterns[5],
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
|
||||||
|
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
|
||||||
|
assert.Equal(t, test.wantMatch, gotMatch)
|
||||||
|
assert.Equal(t, test.wantPrefix, gotPrefix)
|
||||||
|
assert.Equal(t, test.wantPattern, gotPattern)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatternMatchToEntries(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
f := newTestLister(t)
|
||||||
|
f.names = []string{"file.jpg"}
|
||||||
|
f.albums.add(&api.Album{
|
||||||
|
ID: "1",
|
||||||
|
Title: "sub/one",
|
||||||
|
})
|
||||||
|
f.albums.add(&api.Album{
|
||||||
|
ID: "2",
|
||||||
|
Title: "sub",
|
||||||
|
})
|
||||||
|
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
|
||||||
|
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
|
||||||
|
|
||||||
|
for testNumber, test := range []struct {
|
||||||
|
// input
|
||||||
|
root string
|
||||||
|
itemPath string
|
||||||
|
// expected output
|
||||||
|
wantMatch []string
|
||||||
|
wantPrefix string
|
||||||
|
remotes []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
root: "",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{""},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "upload",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"upload", ""},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"upload/file1.jpg", "upload/dir/"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "upload",
|
||||||
|
itemPath: "dir",
|
||||||
|
wantMatch: []string{"upload/dir", "dir"},
|
||||||
|
wantPrefix: "dir/",
|
||||||
|
remotes: []string{"upload/dir/file2.jpg"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"media"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/all",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"media/all"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"file.jpg"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media",
|
||||||
|
itemPath: "all",
|
||||||
|
wantMatch: []string{"media/all"},
|
||||||
|
wantPrefix: "all/",
|
||||||
|
remotes: []string{"all/file.jpg"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/by-year",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"media/by-year"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/by-year/2000",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"media/by-year/2000", "2000"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"file.jpg"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/by-month",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"media/by-month"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/by-month/2001",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"media/by-month/2001", "2001"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/by-month/2001/2001-01",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"file.jpg"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/by-day",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"media/by-day"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/by-day/2001",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"media/by-day/2001", "2001"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "media/by-day/2001/2001-01-02",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"file.jpg"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "album",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"album"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"sub/"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "album/sub",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"album/sub", "sub"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"one/", "file.jpg"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "album/sub/one",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"album/sub/one", "sub/one"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"file.jpg"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "shared-album",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"shared-album"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"sub/"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "shared-album/sub",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"shared-album/sub", "sub"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"one/", "file.jpg"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: "shared-album/sub/one",
|
||||||
|
itemPath: "",
|
||||||
|
wantMatch: []string{"shared-album/sub/one", "sub/one"},
|
||||||
|
wantPrefix: "",
|
||||||
|
remotes: []string{"file.jpg"},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
|
||||||
|
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
|
||||||
|
assert.Equal(t, test.wantMatch, match)
|
||||||
|
assert.Equal(t, test.wantPrefix, prefix)
|
||||||
|
assert.NotNil(t, pattern)
|
||||||
|
assert.NotNil(t, pattern.toEntries)
|
||||||
|
|
||||||
|
entries, err := pattern.toEntries(ctx, f, prefix, match)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
var remotes = []string{}
|
||||||
|
for _, entry := range entries {
|
||||||
|
remote := entry.Remote()
|
||||||
|
if _, isDir := entry.(fs.Directory); isDir {
|
||||||
|
remote += "/"
|
||||||
|
}
|
||||||
|
remotes = append(remotes, remote)
|
||||||
|
if len(remotes) >= 4 {
|
||||||
|
break // only test first 4 entries
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.Equal(t, test.remotes, remotes)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatternYears(t *testing.T) {
|
||||||
|
f := newTestLister(t)
|
||||||
|
entries, err := years(context.Background(), f, "potato/", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
year := 2000
|
||||||
|
for _, entry := range entries {
|
||||||
|
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
|
||||||
|
year++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatternMonths(t *testing.T) {
|
||||||
|
f := newTestLister(t)
|
||||||
|
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, 12, len(entries))
|
||||||
|
for i, entry := range entries {
|
||||||
|
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatternDays(t *testing.T) {
|
||||||
|
f := newTestLister(t)
|
||||||
|
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, 366, len(entries))
|
||||||
|
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
|
||||||
|
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatternYearMonthDayFilter(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
f := newTestLister(t)
|
||||||
|
|
||||||
|
// Years
|
||||||
|
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, api.SearchFilter{
|
||||||
|
Filters: &api.Filters{
|
||||||
|
DateFilter: &api.DateFilter{
|
||||||
|
Dates: []api.Date{
|
||||||
|
{
|
||||||
|
Year: 2000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, sf)
|
||||||
|
|
||||||
|
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
// Months
|
||||||
|
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, api.SearchFilter{
|
||||||
|
Filters: &api.Filters{
|
||||||
|
DateFilter: &api.DateFilter{
|
||||||
|
Dates: []api.Date{
|
||||||
|
{
|
||||||
|
Month: 1,
|
||||||
|
Year: 2000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, sf)
|
||||||
|
|
||||||
|
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
// Days
|
||||||
|
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, api.SearchFilter{
|
||||||
|
Filters: &api.Filters{
|
||||||
|
DateFilter: &api.DateFilter{
|
||||||
|
Dates: []api.Date{
|
||||||
|
{
|
||||||
|
Day: 2,
|
||||||
|
Month: 1,
|
||||||
|
Year: 2000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, sf)
|
||||||
|
|
||||||
|
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatternAlbumsToEntries(t *testing.T) {
|
||||||
|
f := newTestLister(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||||
|
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||||
|
|
||||||
|
f.albums.add(&api.Album{
|
||||||
|
ID: "1",
|
||||||
|
Title: "sub/one",
|
||||||
|
})
|
||||||
|
|
||||||
|
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, len(entries))
|
||||||
|
assert.Equal(t, "potato/one", entries[0].Remote())
|
||||||
|
_, ok := entries[0].(fs.Directory)
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
|
||||||
|
f.albums.add(&api.Album{
|
||||||
|
ID: "1",
|
||||||
|
Title: "sub",
|
||||||
|
})
|
||||||
|
f.names = []string{"file.jpg"}
|
||||||
|
|
||||||
|
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 2, len(entries))
|
||||||
|
assert.Equal(t, "potato/one", entries[0].Remote())
|
||||||
|
_, ok = entries[0].(fs.Directory)
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
|
||||||
|
_, ok = entries[1].(fs.Object)
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
|
||||||
|
}
|
||||||
BIN
backend/googlephotos/testfiles/rclone-test-image1.jpg
Normal file
BIN
backend/googlephotos/testfiles/rclone-test-image1.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
BIN
backend/googlephotos/testfiles/rclone-test-image2.jpg
Normal file
BIN
backend/googlephotos/testfiles/rclone-test-image2.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
@@ -5,7 +5,9 @@
|
|||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@@ -13,13 +15,13 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/lib/rest"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"golang.org/x/net/html"
|
"golang.org/x/net/html"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -40,7 +42,41 @@ func init() {
|
|||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "https://example.com",
|
Value: "https://example.com",
|
||||||
Help: "Connect to example.com",
|
Help: "Connect to example.com",
|
||||||
|
}, {
|
||||||
|
Value: "https://user:pass@example.com",
|
||||||
|
Help: "Connect to example.com using a username and password",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "headers",
|
||||||
|
Help: `Set HTTP headers for all transactions
|
||||||
|
|
||||||
|
Use this to set additional HTTP headers for all transactions
|
||||||
|
|
||||||
|
The input format is comma separated list of key,value pairs. Standard
|
||||||
|
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||||
|
|
||||||
|
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||||
|
|
||||||
|
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
|
||||||
|
`,
|
||||||
|
Default: fs.CommaSepList{},
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_slash",
|
||||||
|
Help: `Set this if the site doesn't end directories with /
|
||||||
|
|
||||||
|
Use this if your target website does not use / on the end of
|
||||||
|
directories.
|
||||||
|
|
||||||
|
A / on the end of a path is how rclone normally tells the difference
|
||||||
|
between files and directories. If this flag is set, then rclone will
|
||||||
|
treat all files with Content-Type: text/html as directories and read
|
||||||
|
URLs from them rather than downloading them.
|
||||||
|
|
||||||
|
Note that this may cause rclone to confuse genuine HTML files with
|
||||||
|
directories.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
@@ -48,7 +84,9 @@ func init() {
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Endpoint string `config:"url"`
|
Endpoint string `config:"url"`
|
||||||
|
NoSlash bool `config:"no_slash"`
|
||||||
|
Headers fs.CommaSepList `config:"headers"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs stores the interface to the remote HTTP files
|
// Fs stores the interface to the remote HTTP files
|
||||||
@@ -93,6 +131,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(opt.Headers)%2 != 0 {
|
||||||
|
return nil, errors.New("odd number of headers supplied")
|
||||||
|
}
|
||||||
|
|
||||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||||
opt.Endpoint += "/"
|
opt.Endpoint += "/"
|
||||||
}
|
}
|
||||||
@@ -118,10 +160,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return http.ErrUseLastResponse
|
return http.ErrUseLastResponse
|
||||||
}
|
}
|
||||||
// check to see if points to a file
|
// check to see if points to a file
|
||||||
res, err := noRedir.Head(u.String())
|
req, err := http.NewRequest("HEAD", u.String(), nil)
|
||||||
err = statusError(res, err)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
isFile = true
|
addHeaders(req, opt)
|
||||||
|
res, err := noRedir.Do(req)
|
||||||
|
err = statusError(res, err)
|
||||||
|
if err == nil {
|
||||||
|
isFile = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -186,7 +232,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject creates a new remote http file object
|
// NewObject creates a new remote http file object
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@@ -248,7 +294,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
|||||||
}
|
}
|
||||||
// calculate the name relative to the base
|
// calculate the name relative to the base
|
||||||
name = u.Path[len(base.Path):]
|
name = u.Path[len(base.Path):]
|
||||||
// musn't be empty
|
// mustn't be empty
|
||||||
if name == "" {
|
if name == "" {
|
||||||
return "", errNameIsEmpty
|
return "", errNameIsEmpty
|
||||||
}
|
}
|
||||||
@@ -267,14 +313,20 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var walk func(*html.Node)
|
var (
|
||||||
|
walk func(*html.Node)
|
||||||
|
seen = make(map[string]struct{})
|
||||||
|
)
|
||||||
walk = func(n *html.Node) {
|
walk = func(n *html.Node) {
|
||||||
if n.Type == html.ElementNode && n.Data == "a" {
|
if n.Type == html.ElementNode && n.Data == "a" {
|
||||||
for _, a := range n.Attr {
|
for _, a := range n.Attr {
|
||||||
if a.Key == "href" {
|
if a.Key == "href" {
|
||||||
name, err := parseName(base, a.Val)
|
name, err := parseName(base, a.Val)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
names = append(names, name)
|
if _, found := seen[name]; !found {
|
||||||
|
names = append(names, name)
|
||||||
|
seen[name] = struct{}{}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -288,6 +340,20 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Adds the configured headers to the request if any
|
||||||
|
func addHeaders(req *http.Request, opt *Options) {
|
||||||
|
for i := 0; i < len(opt.Headers); i += 2 {
|
||||||
|
key := opt.Headers[i]
|
||||||
|
value := opt.Headers[i+1]
|
||||||
|
req.Header.Add(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds the configured headers to the request if any
|
||||||
|
func (f *Fs) addHeaders(req *http.Request) {
|
||||||
|
addHeaders(req, &f.opt)
|
||||||
|
}
|
||||||
|
|
||||||
// Read the directory passed in
|
// Read the directory passed in
|
||||||
func (f *Fs) readDir(dir string) (names []string, err error) {
|
func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||||
URL := f.url(dir)
|
URL := f.url(dir)
|
||||||
@@ -298,15 +364,23 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
|||||||
if !strings.HasSuffix(URL, "/") {
|
if !strings.HasSuffix(URL, "/") {
|
||||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||||
}
|
}
|
||||||
res, err := f.httpClient.Get(URL)
|
// Do the request
|
||||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
req, err := http.NewRequest("GET", URL, nil)
|
||||||
return nil, fs.ErrorDirNotFound
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "readDir failed")
|
||||||
|
}
|
||||||
|
f.addHeaders(req)
|
||||||
|
res, err := f.httpClient.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
defer fs.CheckClose(res.Body, &err)
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return nil, fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
}
|
}
|
||||||
err = statusError(res, err)
|
err = statusError(res, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to readDir")
|
return nil, errors.Wrap(err, "failed to readDir")
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(res.Body, &err)
|
|
||||||
|
|
||||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||||
switch contentType {
|
switch contentType {
|
||||||
@@ -330,7 +404,7 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||||
dir += "/"
|
dir += "/"
|
||||||
}
|
}
|
||||||
@@ -350,11 +424,16 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
if err = file.stat(); err != nil {
|
switch err = file.stat(); err {
|
||||||
|
case nil:
|
||||||
|
entries = append(entries, file)
|
||||||
|
case fs.ErrorNotAFile:
|
||||||
|
// ...found a directory not a file
|
||||||
|
dir := fs.NewDir(remote, timeUnset)
|
||||||
|
entries = append(entries, dir)
|
||||||
|
default:
|
||||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
entries = append(entries, file)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return entries, nil
|
return entries, nil
|
||||||
@@ -365,12 +444,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return nil, errorReadOnly
|
return nil, errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return nil, errorReadOnly
|
return nil, errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -393,7 +472,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -403,7 +482,7 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the remote http file
|
// ModTime returns the modification time of the remote http file
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
return o.modTime
|
return o.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -415,7 +494,12 @@ func (o *Object) url() string {
|
|||||||
// stat updates the info field in the Object
|
// stat updates the info field in the Object
|
||||||
func (o *Object) stat() error {
|
func (o *Object) stat() error {
|
||||||
url := o.url()
|
url := o.url()
|
||||||
res, err := o.fs.httpClient.Head(url)
|
req, err := http.NewRequest("HEAD", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "stat failed")
|
||||||
|
}
|
||||||
|
o.fs.addHeaders(req)
|
||||||
|
res, err := o.fs.httpClient.Do(req)
|
||||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
@@ -430,13 +514,23 @@ func (o *Object) stat() error {
|
|||||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||||
o.modTime = t
|
o.modTime = t
|
||||||
o.contentType = res.Header.Get("Content-Type")
|
o.contentType = res.Header.Get("Content-Type")
|
||||||
|
// If NoSlash is set then check ContentType to see if it is a directory
|
||||||
|
if o.fs.opt.NoSlash {
|
||||||
|
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
|
||||||
|
}
|
||||||
|
if mediaType == "text/html" {
|
||||||
|
return fs.ErrorNotAFile
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification and access time to the specified time
|
// SetModTime sets the modification and access time to the specified time
|
||||||
//
|
//
|
||||||
// it also updates the info field
|
// it also updates the info field
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -446,7 +540,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open a remote http file object for reading. Seek is supported
|
// Open a remote http file object for reading. Seek is supported
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
url := o.url()
|
url := o.url()
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -457,6 +551,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
for k, v := range fs.OpenOptionHeaders(options) {
|
for k, v := range fs.OpenOptionHeaders(options) {
|
||||||
req.Header.Add(k, v)
|
req.Header.Add(k, v)
|
||||||
}
|
}
|
||||||
|
o.fs.addHeaders(req)
|
||||||
|
|
||||||
// Do the request
|
// Do the request
|
||||||
res, err := o.fs.httpClient.Do(req)
|
res, err := o.fs.httpClient.Do(req)
|
||||||
@@ -473,27 +568,27 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
// Mkdir makes the root directory of the Fs object
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove a remote http file object
|
// Remove a remote http file object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
// Rmdir removes the root directory of the Fs object
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
// Update in to the object with the modTime given of the given size
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.contentType
|
return o.contentType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
// +build go1.8
|
|
||||||
|
|
||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -11,14 +10,15 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/ncw/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@@ -27,6 +27,7 @@ var (
|
|||||||
remoteName = "TestHTTP"
|
remoteName = "TestHTTP"
|
||||||
testPath = "test"
|
testPath = "test"
|
||||||
filesPath = filepath.Join(testPath, "files")
|
filesPath = filepath.Join(testPath, "files")
|
||||||
|
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||||
)
|
)
|
||||||
|
|
||||||
// prepareServer the test server and return a function to tidy it up afterwards
|
// prepareServer the test server and return a function to tidy it up afterwards
|
||||||
@@ -34,8 +35,16 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
|||||||
// file server for test/files
|
// file server for test/files
|
||||||
fileServer := http.FileServer(http.Dir(filesPath))
|
fileServer := http.FileServer(http.Dir(filesPath))
|
||||||
|
|
||||||
|
// test the headers are there then pass on to fileServer
|
||||||
|
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||||
|
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||||
|
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||||
|
fileServer.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
|
||||||
// Make the test server
|
// Make the test server
|
||||||
ts := httptest.NewServer(fileServer)
|
ts := httptest.NewServer(handler)
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.LoadConfig()
|
config.LoadConfig()
|
||||||
@@ -46,8 +55,9 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
|||||||
// config.FileSet(remoteName, "url", ts.URL)
|
// config.FileSet(remoteName, "url", ts.URL)
|
||||||
|
|
||||||
m := configmap.Simple{
|
m := configmap.Simple{
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"url": ts.URL,
|
"url": ts.URL,
|
||||||
|
"headers": strings.Join(headers, ","),
|
||||||
}
|
}
|
||||||
|
|
||||||
// return a function to tidy up
|
// return a function to tidy up
|
||||||
@@ -65,8 +75,8 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
|||||||
return f, tidy
|
return f, tidy
|
||||||
}
|
}
|
||||||
|
|
||||||
func testListRoot(t *testing.T, f fs.Fs) {
|
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||||
entries, err := f.List("")
|
entries, err := f.List(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
@@ -93,22 +103,36 @@ func testListRoot(t *testing.T, f fs.Fs) {
|
|||||||
|
|
||||||
e = entries[3]
|
e = entries[3]
|
||||||
assert.Equal(t, "two.html", e.Remote())
|
assert.Equal(t, "two.html", e.Remote())
|
||||||
assert.Equal(t, int64(7), e.Size())
|
if noSlash {
|
||||||
_, ok = e.(*Object)
|
assert.Equal(t, int64(-1), e.Size())
|
||||||
assert.True(t, ok)
|
_, ok = e.(fs.Directory)
|
||||||
|
assert.True(t, ok)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, int64(41), e.Size())
|
||||||
|
_, ok = e.(*Object)
|
||||||
|
assert.True(t, ok)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestListRoot(t *testing.T) {
|
func TestListRoot(t *testing.T) {
|
||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
testListRoot(t, f)
|
testListRoot(t, f, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListRootNoSlash(t *testing.T) {
|
||||||
|
f, tidy := prepare(t)
|
||||||
|
f.(*Fs).opt.NoSlash = true
|
||||||
|
defer tidy()
|
||||||
|
|
||||||
|
testListRoot(t, f, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestListSubDir(t *testing.T) {
|
func TestListSubDir(t *testing.T) {
|
||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
entries, err := f.List("three")
|
entries, err := f.List(context.Background(), "three")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
@@ -126,7 +150,7 @@ func TestNewObject(t *testing.T) {
|
|||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
o, err := f.NewObject("four/under four.txt")
|
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||||
@@ -136,7 +160,7 @@ func TestNewObject(t *testing.T) {
|
|||||||
|
|
||||||
// Test the time is correct on the object
|
// Test the time is correct on the object
|
||||||
|
|
||||||
tObj := o.ModTime()
|
tObj := o.ModTime(context.Background())
|
||||||
|
|
||||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -146,7 +170,7 @@ func TestNewObject(t *testing.T) {
|
|||||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||||
|
|
||||||
// check object not found
|
// check object not found
|
||||||
o, err = f.NewObject("not found.txt")
|
o, err = f.NewObject(context.Background(), "not found.txt")
|
||||||
assert.Nil(t, o)
|
assert.Nil(t, o)
|
||||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||||
}
|
}
|
||||||
@@ -155,11 +179,11 @@ func TestOpen(t *testing.T) {
|
|||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
o, err := f.NewObject("four/under four.txt")
|
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Test normal read
|
// Test normal read
|
||||||
fd, err := o.Open()
|
fd, err := o.Open(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data, err := ioutil.ReadAll(fd)
|
data, err := ioutil.ReadAll(fd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -167,7 +191,7 @@ func TestOpen(t *testing.T) {
|
|||||||
assert.Equal(t, "beetroot\n", string(data))
|
assert.Equal(t, "beetroot\n", string(data))
|
||||||
|
|
||||||
// Test with range request
|
// Test with range request
|
||||||
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
|
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data, err = ioutil.ReadAll(fd)
|
data, err = ioutil.ReadAll(fd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -179,12 +203,12 @@ func TestMimeType(t *testing.T) {
|
|||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
o, err := f.NewObject("four/under four.txt")
|
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
do, ok := o.(fs.MimeTyper)
|
do, ok := o.(fs.MimeTyper)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
|
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsAFileRoot(t *testing.T) {
|
func TestIsAFileRoot(t *testing.T) {
|
||||||
@@ -194,7 +218,7 @@ func TestIsAFileRoot(t *testing.T) {
|
|||||||
f, err := NewFs(remoteName, "one%.txt", m)
|
f, err := NewFs(remoteName, "one%.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
testListRoot(t, f)
|
testListRoot(t, f, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsAFileSubDir(t *testing.T) {
|
func TestIsAFileSubDir(t *testing.T) {
|
||||||
@@ -204,7 +228,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
|||||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
entries, err := f.List("")
|
entries, err := f.List(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
potato
|
<a href="two.html/file.txt">file.txt</a>
|
||||||
|
|||||||
@@ -24,7 +24,7 @@
|
|||||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td> </td></tr>
|
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td> </td></tr>
|
||||||
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td> </td></tr>
|
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td> </td></tr>
|
||||||
<tr><th colspan="5"><hr></th></tr>
|
<tr><th colspan="5"><hr></th></tr>
|
||||||
<!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
|
<!-- some extras from https://github.com/rclone/rclone/issues/1573 -->
|
||||||
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
||||||
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
||||||
|
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// auth is an authenticator for swift
|
// auth is an authenticator for swift
|
||||||
|
|||||||
@@ -9,20 +9,22 @@ package hubic
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/swift"
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/config"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
|
||||||
"github.com/ncw/rclone/lib/oauthutil"
|
|
||||||
swiftLib "github.com/ncw/swift"
|
swiftLib "github.com/ncw/swift"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/backend/swift"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -124,7 +126,9 @@ func (f *Fs) getCredentials() (err error) {
|
|||||||
}
|
}
|
||||||
defer fs.CheckClose(resp.Body, &err)
|
defer fs.CheckClose(resp.Body, &err)
|
||||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||||
return errors.Errorf("failed to get credentials: %s", resp.Status)
|
body, _ := ioutil.ReadAll(resp.Body)
|
||||||
|
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||||
|
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||||
}
|
}
|
||||||
decoder := json.NewDecoder(resp.Body)
|
decoder := json.NewDecoder(resp.Body)
|
||||||
var result credentials
|
var result credentials
|
||||||
|
|||||||
@@ -4,14 +4,16 @@ package hubic_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/hubic"
|
"github.com/rclone/rclone/backend/hubic"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestHubic:",
|
RemoteName: "TestHubic:",
|
||||||
NilObject: (*hubic.Object)(nil),
|
NilObject: (*hubic.Object)(nil),
|
||||||
|
SkipFsCheckWrap: true,
|
||||||
|
SkipObjectCheckWrap: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,6 +46,82 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
|||||||
// APIString returns Time string in Jottacloud API format
|
// APIString returns Time string in Jottacloud API format
|
||||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||||
|
|
||||||
|
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||||
|
// providers returning a token in JSON form.
|
||||||
|
type TokenJSON struct {
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
TokenType string `json:"token_type"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSON structures returned by new API
|
||||||
|
|
||||||
|
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||||
|
type AllocateFileRequest struct {
|
||||||
|
Bytes int64 `json:"bytes"`
|
||||||
|
Created string `json:"created"`
|
||||||
|
Md5 string `json:"md5"`
|
||||||
|
Modified string `json:"modified"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllocateFileResponse for upload requests
|
||||||
|
type AllocateFileResponse struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
State string `json:"state"`
|
||||||
|
UploadID string `json:"upload_id"`
|
||||||
|
UploadURL string `json:"upload_url"`
|
||||||
|
Bytes int64 `json:"bytes"`
|
||||||
|
ResumePos int64 `json:"resume_pos"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadResponse after an upload
|
||||||
|
type UploadResponse struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Kind string `json:"kind"`
|
||||||
|
ContentID string `json:"content_id"`
|
||||||
|
Bytes int64 `json:"bytes"`
|
||||||
|
Md5 string `json:"md5"`
|
||||||
|
Created int64 `json:"created"`
|
||||||
|
Modified int64 `json:"modified"`
|
||||||
|
Deleted interface{} `json:"deleted"`
|
||||||
|
Mime string `json:"mime"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeviceRegistrationResponse is the response to registering a device
|
||||||
|
type DeviceRegistrationResponse struct {
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
ClientSecret string `json:"client_secret"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
|
||||||
|
type CustomerInfo struct {
|
||||||
|
Username string `json:"username"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
CountryCode string `json:"country_code"`
|
||||||
|
LanguageCode string `json:"language_code"`
|
||||||
|
CustomerGroupCode string `json:"customer_group_code"`
|
||||||
|
BrandCode string `json:"brand_code"`
|
||||||
|
AccountType string `json:"account_type"`
|
||||||
|
SubscriptionType string `json:"subscription_type"`
|
||||||
|
Usage int64 `json:"usage"`
|
||||||
|
Qouta int64 `json:"quota"`
|
||||||
|
BusinessUsage int64 `json:"business_usage"`
|
||||||
|
BusinessQouta int64 `json:"business_quota"`
|
||||||
|
WriteLocked bool `json:"write_locked"`
|
||||||
|
ReadLocked bool `json:"read_locked"`
|
||||||
|
LockedCause interface{} `json:"locked_cause"`
|
||||||
|
WebHash string `json:"web_hash"`
|
||||||
|
AndroidHash string `json:"android_hash"`
|
||||||
|
IOSHash string `json:"ios_hash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// XML structures returned by the old API
|
||||||
|
|
||||||
// Flag is a hacky type for checking if an attribute is present
|
// Flag is a hacky type for checking if an attribute is present
|
||||||
type Flag bool
|
type Flag bool
|
||||||
|
|
||||||
@@ -64,15 +140,6 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
|||||||
return attr, errors.New("unimplemented")
|
return attr, errors.New("unimplemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
|
||||||
// providers returning a token in JSON form.
|
|
||||||
type TokenJSON struct {
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
TokenType string `json:"token_type"`
|
|
||||||
RefreshToken string `json:"refresh_token"`
|
|
||||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GET http://www.jottacloud.com/JFS/<account>
|
GET http://www.jottacloud.com/JFS/<account>
|
||||||
|
|
||||||
@@ -102,8 +169,8 @@ GET http://www.jottacloud.com/JFS/<account>
|
|||||||
</user>
|
</user>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// AccountInfo represents a Jottacloud account
|
// DriveInfo represents a Jottacloud account
|
||||||
type AccountInfo struct {
|
type DriveInfo struct {
|
||||||
Username string `xml:"username"`
|
Username string `xml:"username"`
|
||||||
AccountType string `xml:"account-type"`
|
AccountType string `xml:"account-type"`
|
||||||
Locked bool `xml:"locked"`
|
Locked bool `xml:"locked"`
|
||||||
@@ -280,37 +347,3 @@ func (e *Error) Error() string {
|
|||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
|
||||||
type AllocateFileRequest struct {
|
|
||||||
Bytes int64 `json:"bytes"`
|
|
||||||
Created string `json:"created"`
|
|
||||||
Md5 string `json:"md5"`
|
|
||||||
Modified string `json:"modified"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllocateFileResponse for upload requests
|
|
||||||
type AllocateFileResponse struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
State string `json:"state"`
|
|
||||||
UploadID string `json:"upload_id"`
|
|
||||||
UploadURL string `json:"upload_url"`
|
|
||||||
Bytes int64 `json:"bytes"`
|
|
||||||
ResumePos int64 `json:"resume_pos"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadResponse after an upload
|
|
||||||
type UploadResponse struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
Kind string `json:"kind"`
|
|
||||||
ContentID string `json:"content_id"`
|
|
||||||
Bytes int64 `json:"bytes"`
|
|
||||||
Md5 string `json:"md5"`
|
|
||||||
Created int64 `json:"created"`
|
|
||||||
Modified int64 `json:"modified"`
|
|
||||||
Deleted interface{} `json:"deleted"`
|
|
||||||
Mime string `json:"mime"`
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -2,12 +2,14 @@ package jottacloud
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
@@ -16,21 +18,21 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/jottacloud/api"
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/accounting"
|
|
||||||
"github.com/ncw/rclone/fs/config"
|
|
||||||
"github.com/ncw/rclone/fs/config/configmap"
|
|
||||||
"github.com/ncw/rclone/fs/config/configstruct"
|
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
|
||||||
"github.com/ncw/rclone/fs/hash"
|
|
||||||
"github.com/ncw/rclone/fs/walk"
|
|
||||||
"github.com/ncw/rclone/lib/oauthutil"
|
|
||||||
"github.com/ncw/rclone/lib/pacer"
|
|
||||||
"github.com/ncw/rclone/lib/rest"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/backend/jottacloud/api"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/walk"
|
||||||
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -40,15 +42,20 @@ const (
|
|||||||
maxSleep = 2 * time.Second
|
maxSleep = 2 * time.Second
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
defaultDevice = "Jotta"
|
defaultDevice = "Jotta"
|
||||||
defaultMountpoint = "Sync"
|
defaultMountpoint = "Archive"
|
||||||
rootURL = "https://www.jottacloud.com/jfs/"
|
rootURL = "https://www.jottacloud.com/jfs/"
|
||||||
apiURL = "https://api.jottacloud.com/files/v1/"
|
apiURL = "https://api.jottacloud.com/"
|
||||||
baseURL = "https://www.jottacloud.com/"
|
baseURL = "https://www.jottacloud.com/"
|
||||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||||
|
registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||||
cachePrefix = "rclone-jcmd5-"
|
cachePrefix = "rclone-jcmd5-"
|
||||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||||
configUsername = "user"
|
configClientID = "client_id"
|
||||||
|
configClientSecret = "client_secret"
|
||||||
|
configDevice = "device"
|
||||||
|
configMountpoint = "mountpoint"
|
||||||
|
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -58,14 +65,13 @@ var (
|
|||||||
AuthURL: tokenURL,
|
AuthURL: tokenURL,
|
||||||
TokenURL: tokenURL,
|
TokenURL: tokenURL,
|
||||||
},
|
},
|
||||||
ClientID: rcloneClientID,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
|
// needs to be done early so we can use oauth during config
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "jottacloud",
|
Name: "jottacloud",
|
||||||
Description: "JottaCloud",
|
Description: "JottaCloud",
|
||||||
@@ -79,74 +85,62 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
username, ok := m.Get(configUsername)
|
|
||||||
if !ok {
|
|
||||||
log.Fatalf("No username defined")
|
|
||||||
}
|
|
||||||
password := config.GetPassword("Your Jottacloud password is only required during config and will not be stored.")
|
|
||||||
|
|
||||||
// prepare out token request with username and password
|
|
||||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||||
values := url.Values{}
|
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||||
values.Set("grant_type", "PASSWORD")
|
if config.Confirm() {
|
||||||
values.Set("password", password)
|
deviceRegistration, err := registerDevice(srv)
|
||||||
values.Set("username", username)
|
|
||||||
values.Set("client_id", oauthConfig.ClientID)
|
|
||||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
|
||||||
ContentType: "application/x-www-form-urlencoded",
|
|
||||||
Parameters: values,
|
|
||||||
}
|
|
||||||
|
|
||||||
var jsonToken api.TokenJSON
|
|
||||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
|
||||||
if err != nil {
|
|
||||||
// if 2fa is enabled the first request is expected to fail. we'lls do another request with the 2fa code as an additional http header
|
|
||||||
if resp != nil {
|
|
||||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
|
||||||
fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n")
|
|
||||||
fmt.Printf("Enter verification code> ")
|
|
||||||
authCode := config.ReadLine()
|
|
||||||
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
|
|
||||||
opts.ExtraHeaders = make(map[string]string)
|
|
||||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
|
||||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to get resource token: %v", err)
|
log.Fatalf("Failed to register device: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m.Set(configClientID, deviceRegistration.ClientID)
|
||||||
|
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||||
|
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||||
}
|
}
|
||||||
|
|
||||||
var token oauth2.Token
|
clientID, ok := m.Get(configClientID)
|
||||||
token.AccessToken = jsonToken.AccessToken
|
if !ok {
|
||||||
token.RefreshToken = jsonToken.RefreshToken
|
clientID = rcloneClientID
|
||||||
token.TokenType = jsonToken.TokenType
|
}
|
||||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
clientSecret, ok := m.Get(configClientSecret)
|
||||||
|
if !ok {
|
||||||
|
clientSecret = rcloneEncryptedClientSecret
|
||||||
|
}
|
||||||
|
oauthConfig.ClientID = clientID
|
||||||
|
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||||
|
|
||||||
// finally save them in the config
|
fmt.Printf("Username> ")
|
||||||
|
username := config.ReadLine()
|
||||||
|
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||||
|
|
||||||
|
token, err := doAuth(srv, username, password)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to get oauth token: %s", err)
|
||||||
|
}
|
||||||
err = oauthutil.PutToken(name, m, &token, true)
|
err = oauthutil.PutToken(name, m, &token, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Error while setting token: %s", err)
|
log.Fatalf("Error while saving token: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||||
|
if config.Confirm() {
|
||||||
|
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||||
|
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||||
|
|
||||||
|
device, mountpoint, err := setupMountpoint(srv, apiSrv)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||||
|
}
|
||||||
|
m.Set(configDevice, device)
|
||||||
|
m.Set(configMountpoint, mountpoint)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: configUsername,
|
|
||||||
Help: "User Name:",
|
|
||||||
}, {
|
|
||||||
Name: "mountpoint",
|
|
||||||
Help: "The mountpoint to use.",
|
|
||||||
Required: true,
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "Sync",
|
|
||||||
Help: "Will be synced by the official client.",
|
|
||||||
}, {
|
|
||||||
Value: "Archive",
|
|
||||||
Help: "Archive",
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
Name: "md5_memory_limit",
|
Name: "md5_memory_limit",
|
||||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||||
@@ -163,7 +157,7 @@ func init() {
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_resume_limit",
|
Name: "upload_resume_limit",
|
||||||
Help: "Files bigger than this can be resumed if the upload failes.",
|
Help: "Files bigger than this can be resumed if the upload fail's.",
|
||||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
@@ -172,7 +166,7 @@ func init() {
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
User string `config:"user"`
|
Device string `config:"device"`
|
||||||
Mountpoint string `config:"mountpoint"`
|
Mountpoint string `config:"mountpoint"`
|
||||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||||
HardDelete bool `config:"hard_delete"`
|
HardDelete bool `config:"hard_delete"`
|
||||||
@@ -190,7 +184,7 @@ type Fs struct {
|
|||||||
endpointURL string
|
endpointURL string
|
||||||
srv *rest.Client
|
srv *rest.Client
|
||||||
apiSrv *rest.Client
|
apiSrv *rest.Client
|
||||||
pacer *pacer.Pacer
|
pacer *fs.Pacer
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -251,6 +245,167 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
|||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// registerDevice register a new device for use with the jottacloud API
|
||||||
|
func registerDevice(srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||||
|
// random generator to generate random device names
|
||||||
|
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
randonDeviceNamePartLength := 21
|
||||||
|
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||||
|
for i := range randomDeviceNamePart {
|
||||||
|
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||||
|
}
|
||||||
|
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||||
|
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||||
|
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("device_id", randomDeviceName)
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
RootURL: registerURL,
|
||||||
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
|
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||||
|
Parameters: values,
|
||||||
|
}
|
||||||
|
|
||||||
|
var deviceRegistration *api.DeviceRegistrationResponse
|
||||||
|
_, err = srv.CallJSON(&opts, nil, &deviceRegistration)
|
||||||
|
return deviceRegistration, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAuth runs the actual token request
|
||||||
|
func doAuth(srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||||
|
// prepare out token request with username and password
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("grant_type", "PASSWORD")
|
||||||
|
values.Set("password", password)
|
||||||
|
values.Set("username", username)
|
||||||
|
values.Set("client_id", oauthConfig.ClientID)
|
||||||
|
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||||
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
|
Parameters: values,
|
||||||
|
}
|
||||||
|
|
||||||
|
// do the first request
|
||||||
|
var jsonToken api.TokenJSON
|
||||||
|
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||||
|
if err != nil {
|
||||||
|
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||||
|
if resp != nil {
|
||||||
|
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||||
|
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||||
|
fmt.Printf("Enter verification code> ")
|
||||||
|
authCode := config.ReadLine()
|
||||||
|
|
||||||
|
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||||
|
opts.ExtraHeaders = make(map[string]string)
|
||||||
|
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||||
|
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
token.AccessToken = jsonToken.AccessToken
|
||||||
|
token.RefreshToken = jsonToken.RefreshToken
|
||||||
|
token.TokenType = jsonToken.TokenType
|
||||||
|
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupMountpoint sets up a custom device and mountpoint if desired by the user
|
||||||
|
func setupMountpoint(srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
|
||||||
|
cust, err := getCustomerInfo(apiSrv)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
acc, err := getDriveInfo(srv, cust.Username)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
var deviceNames []string
|
||||||
|
for i := range acc.Devices {
|
||||||
|
deviceNames = append(deviceNames, acc.Devices[i].Name)
|
||||||
|
}
|
||||||
|
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
||||||
|
device = config.Choose("Devices", deviceNames, nil, false)
|
||||||
|
|
||||||
|
dev, err := getDeviceInfo(srv, path.Join(cust.Username, device))
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
if len(dev.MountPoints) == 0 {
|
||||||
|
return "", "", errors.New("no mountpoints for selected device")
|
||||||
|
}
|
||||||
|
var mountpointNames []string
|
||||||
|
for i := range dev.MountPoints {
|
||||||
|
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
|
||||||
|
}
|
||||||
|
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
|
||||||
|
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
|
||||||
|
|
||||||
|
return device, mountpoint, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCustomerInfo queries general information about the account
|
||||||
|
func getCustomerInfo(srv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "account/v1/customer",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = srv.CallJSON(&opts, nil, &info)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't get customer info")
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDriveInfo queries general information about the account and the available devices and mountpoints.
|
||||||
|
func getDriveInfo(srv *rest.Client, username string) (info *api.DriveInfo, err error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: username,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = srv.CallXML(&opts, nil, &info)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't get drive info")
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDeviceInfo queries Information about a jottacloud device
|
||||||
|
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: urlPathEscape(path),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = srv.CallXML(&opts, nil, &info)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't get device info")
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setEndpointURL generates the API endpoint URL
|
||||||
|
func (f *Fs) setEndpointURL() {
|
||||||
|
if f.opt.Device == "" {
|
||||||
|
f.opt.Device = defaultDevice
|
||||||
|
}
|
||||||
|
if f.opt.Mountpoint == "" {
|
||||||
|
f.opt.Mountpoint = defaultMountpoint
|
||||||
|
}
|
||||||
|
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
||||||
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -280,35 +435,6 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
|||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAccountInfo retrieves account information
|
|
||||||
func (f *Fs) getAccountInfo() (info *api.AccountInfo, err error) {
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: urlPathEscape(f.user),
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp *http.Response
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallXML(&opts, nil, &info)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setEndpointUrl reads the account id and generates the API endpoint URL
|
|
||||||
func (f *Fs) setEndpointURL(mountpoint string) (err error) {
|
|
||||||
info, err := f.getAccountInfo()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to get endpoint url")
|
|
||||||
}
|
|
||||||
f.endpointURL = urlPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// errorHandler parses a non 2xx error response into an error
|
// errorHandler parses a non 2xx error response into an error
|
||||||
func errorHandler(resp *http.Response) error {
|
func errorHandler(resp *http.Response) error {
|
||||||
// Decode error response
|
// Decode error response
|
||||||
@@ -341,11 +467,6 @@ func (f *Fs) filePath(file string) string {
|
|||||||
return urlPathEscape(f.filePathRaw(file))
|
return urlPathEscape(f.filePathRaw(file))
|
||||||
}
|
}
|
||||||
|
|
||||||
// filePath returns a escaped file path (f.root, remote)
|
|
||||||
func (o *Object) filePath() string {
|
|
||||||
return o.fs.filePath(o.remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Jottacloud requires the grant_type 'refresh_token' string
|
// Jottacloud requires the grant_type 'refresh_token' string
|
||||||
// to be uppercase and throws a 400 Bad Request if we use the
|
// to be uppercase and throws a 400 Bad Request if we use the
|
||||||
// lower case used by the oauth2 module
|
// lower case used by the oauth2 module
|
||||||
@@ -361,7 +482,7 @@ func grantTypeFilter(req *http.Request) {
|
|||||||
}
|
}
|
||||||
_ = req.Body.Close()
|
_ = req.Body.Close()
|
||||||
|
|
||||||
// make the refesh token upper case
|
// make the refresh token upper case
|
||||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||||
|
|
||||||
// set the new ReadCloser (with a dummy Close())
|
// set the new ReadCloser (with a dummy Close())
|
||||||
@@ -381,6 +502,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
rootIsDir := strings.HasSuffix(root, "/")
|
rootIsDir := strings.HasSuffix(root, "/")
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
|
|
||||||
|
clientID, ok := m.Get(configClientID)
|
||||||
|
if !ok {
|
||||||
|
clientID = rcloneClientID
|
||||||
|
}
|
||||||
|
clientSecret, ok := m.Get(configClientSecret)
|
||||||
|
if !ok {
|
||||||
|
clientSecret = rcloneEncryptedClientSecret
|
||||||
|
}
|
||||||
|
oauthConfig.ClientID = clientID
|
||||||
|
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||||
|
|
||||||
// the oauth client for the api servers needs
|
// the oauth client for the api servers needs
|
||||||
// a filter to fix the grant_type issues (see above)
|
// a filter to fix the grant_type issues (see above)
|
||||||
baseClient := fshttp.NewClient(fs.Config)
|
baseClient := fshttp.NewClient(fs.Config)
|
||||||
@@ -399,11 +531,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
user: opt.User,
|
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
@@ -419,10 +550,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
err = f.setEndpointURL(opt.Mountpoint)
|
cust, err := getCustomerInfo(f.apiSrv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't get account info")
|
return nil, err
|
||||||
}
|
}
|
||||||
|
f.user = cust.Username
|
||||||
|
f.setEndpointURL()
|
||||||
|
|
||||||
if root != "" && !rootIsDir {
|
if root != "" && !rootIsDir {
|
||||||
// Check to see if the root actually an existing file
|
// Check to see if the root actually an existing file
|
||||||
@@ -431,7 +564,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if f.root == "." {
|
if f.root == "." {
|
||||||
f.root = ""
|
f.root = ""
|
||||||
}
|
}
|
||||||
_, err := f.NewObject(remote)
|
_, err := f.NewObject(context.TODO(), remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@@ -469,7 +602,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -506,8 +639,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
//fmt.Printf("List: %s\n", f.filePath(dir))
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: f.filePath(dir),
|
Path: f.filePath(dir),
|
||||||
@@ -556,7 +688,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
}
|
}
|
||||||
entries = append(entries, o)
|
entries = append(entries, o)
|
||||||
}
|
}
|
||||||
//fmt.Printf("Entries: %+v\n", entries)
|
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -612,18 +743,7 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
|||||||
//
|
//
|
||||||
// dir should be "" to start from the root, and should not
|
// dir should be "" to start from the root, and should not
|
||||||
// have trailing slashes.
|
// have trailing slashes.
|
||||||
//
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// It should call callback for each tranche of entries read.
|
|
||||||
// These need not be returned in any particular order. If
|
|
||||||
// callback returns an error then the listing will stop
|
|
||||||
// immediately.
|
|
||||||
//
|
|
||||||
// Don't implement this unless you have a more efficient way
|
|
||||||
// of listing recursively that doing a directory traversal.
|
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: f.filePath(dir),
|
Path: f.filePath(dir),
|
||||||
@@ -676,14 +796,17 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
o := f.createObject(src.Remote(), src.ModTime(), src.Size())
|
if f.opt.Device != "Jotta" {
|
||||||
return o, o.Update(in, src, options...)
|
return nil, errors.New("upload not supported for devices other than Jotta")
|
||||||
|
}
|
||||||
|
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
|
||||||
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// mkParentDir makes the parent of the native path dirPath if
|
// mkParentDir makes the parent of the native path dirPath if
|
||||||
// necessary and any directories above that
|
// necessary and any directories above that
|
||||||
func (f *Fs) mkParentDir(dirPath string) error {
|
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||||
// defer log.Trace(dirPath, "")("")
|
// defer log.Trace(dirPath, "")("")
|
||||||
// chop off trailing / if it exists
|
// chop off trailing / if it exists
|
||||||
if strings.HasSuffix(dirPath, "/") {
|
if strings.HasSuffix(dirPath, "/") {
|
||||||
@@ -693,25 +816,25 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
|||||||
if parent == "." {
|
if parent == "." {
|
||||||
parent = ""
|
parent = ""
|
||||||
}
|
}
|
||||||
return f.Mkdir(parent)
|
return f.Mkdir(ctx, parent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
_, err := f.CreateDir(dir)
|
_, err := f.CreateDir(dir)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// purgeCheck removes the root directory, if check is set then it
|
// purgeCheck removes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(dir string, check bool) (err error) {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
// check that the directory exists
|
// check that the directory exists
|
||||||
entries, err := f.List(dir)
|
entries, err := f.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -744,15 +867,14 @@ func (f *Fs) purgeCheck(dir string, check bool) (err error) {
|
|||||||
return errors.Wrap(err, "couldn't purge directory")
|
return errors.Wrap(err, "couldn't purge directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Parse response?
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@@ -761,15 +883,11 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Purge deletes all the files and the container
|
// Purge deletes all the files and the container
|
||||||
//
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
// Optional interface: Only implement this if you have a way of
|
return f.purgeCheck(ctx, "", false)
|
||||||
// deleting all the files quicker than just running Remove() on the
|
|
||||||
// result of List()
|
|
||||||
func (f *Fs) Purge() error {
|
|
||||||
return f.purgeCheck("", false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyOrMoves copys or moves directories or files depending on the mthod parameter
|
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||||
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -799,14 +917,14 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
|
|
||||||
err := f.mkParentDir(remote)
|
err := f.mkParentDir(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -829,14 +947,14 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
|
|
||||||
err := f.mkParentDir(remote)
|
err := f.mkParentDir(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -858,7 +976,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@@ -875,7 +993,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
//fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath)
|
//fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
_, err = f.List(dstRemote)
|
_, err = f.List(ctx, dstRemote)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@@ -893,7 +1011,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: f.filePath(remote),
|
Path: f.filePath(remote),
|
||||||
@@ -939,8 +1057,8 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
info, err := f.getAccountInfo()
|
info, err := getDriveInfo(f.srv, f.user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -980,8 +1098,13 @@ func (o *Object) Remote() string {
|
|||||||
return o.remote
|
return o.remote
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// filePath returns a escaped file path (f.root, remote)
|
||||||
|
func (o *Object) filePath() string {
|
||||||
|
return o.fs.filePath(o.remote)
|
||||||
|
}
|
||||||
|
|
||||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -999,20 +1122,21 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
// setMetaData sets the metadata from info
|
// setMetaData sets the metadata from info
|
||||||
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = int64(info.Size)
|
o.size = info.Size
|
||||||
o.md5 = info.MD5
|
o.md5 = info.MD5
|
||||||
o.mimeType = info.MimeType
|
o.mimeType = info.MimeType
|
||||||
o.modTime = time.Time(info.ModifiedAt)
|
o.modTime = time.Time(info.ModifiedAt)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// readMetaData reads and updates the metadata for an object
|
||||||
func (o *Object) readMetaData(force bool) (err error) {
|
func (o *Object) readMetaData(force bool) (err error) {
|
||||||
if o.hasMetaData && !force {
|
if o.hasMetaData && !force {
|
||||||
return nil
|
return nil
|
||||||
@@ -1031,7 +1155,7 @@ func (o *Object) readMetaData(force bool) (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData(false)
|
err := o.readMetaData(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
@@ -1041,7 +1165,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
return fs.ErrorCantSetModTime
|
return fs.ErrorCantSetModTime
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1051,7 +1175,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -1080,7 +1204,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
|
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
|
||||||
// we need a MD5
|
// we need a MD5
|
||||||
md5Hasher := md5.New()
|
md5Hasher := md5.New()
|
||||||
// use the teeReader to write to the local file AND caclulate the MD5 while doing so
|
// use the teeReader to write to the local file AND calculate the MD5 while doing so
|
||||||
teeReader := io.TeeReader(in, md5Hasher)
|
teeReader := io.TeeReader(in, md5Hasher)
|
||||||
|
|
||||||
// nothing to clean up by default
|
// nothing to clean up by default
|
||||||
@@ -1135,9 +1259,9 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
|||||||
// If existing is set then it updates the object rather than creating a new one
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
md5String, err := src.Hash(hash.MD5)
|
md5String, err := src.Hash(ctx, hash.MD5)
|
||||||
if err != nil || md5String == "" {
|
if err != nil || md5String == "" {
|
||||||
// unwrap the accounting from the input, we use wrap to put it
|
// unwrap the accounting from the input, we use wrap to put it
|
||||||
// back on after the buffering
|
// back on after the buffering
|
||||||
@@ -1157,10 +1281,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "allocate",
|
Path: "files/v1/allocate",
|
||||||
ExtraHeaders: make(map[string]string),
|
ExtraHeaders: make(map[string]string),
|
||||||
}
|
}
|
||||||
fileDate := api.Time(src.ModTime()).APIString()
|
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||||
|
|
||||||
// the allocate request
|
// the allocate request
|
||||||
var request = api.AllocateFileRequest{
|
var request = api.AllocateFileRequest{
|
||||||
@@ -1212,11 +1336,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
|
|
||||||
// finally update the meta data
|
// finally update the meta data
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = int64(result.Bytes)
|
o.size = result.Bytes
|
||||||
o.md5 = result.Md5
|
o.md5 = result.Md5
|
||||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||||
} else {
|
} else {
|
||||||
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata
|
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||||
return o.readMetaData(true)
|
return o.readMetaData(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1224,7 +1348,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: o.filePath(),
|
Path: o.filePath(),
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ package jottacloud_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/jottacloud"
|
"github.com/rclone/rclone/backend/jottacloud"
|
||||||
"github.com/ncw/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
Translate file names for JottaCloud adapted from OneDrive
|
Translate file names for JottaCloud adapted from OneDrive
|
||||||
|
|
||||||
|
|
||||||
The following characters are JottaClous reserved characters, and can't
|
The following characters are JottaCloud reserved characters, and can't
|
||||||
be used in JottaCloud folder and file names.
|
be used in JottaCloud folder and file names.
|
||||||
|
|
||||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||||
|
|||||||
599
backend/koofr/koofr.go
Normal file
599
backend/koofr/koofr.go
Normal file
@@ -0,0 +1,599 @@
|
|||||||
|
package koofr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
|
||||||
|
httpclient "github.com/koofr/go-httpclient"
|
||||||
|
koofrclient "github.com/koofr/go-koofrclient"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register Fs with rclone
|
||||||
|
func init() {
|
||||||
|
fs.Register(&fs.RegInfo{
|
||||||
|
Name: "koofr",
|
||||||
|
Description: "Koofr",
|
||||||
|
NewFs: NewFs,
|
||||||
|
Options: []fs.Option{
|
||||||
|
{
|
||||||
|
Name: "endpoint",
|
||||||
|
Help: "The Koofr API endpoint to use",
|
||||||
|
Default: "https://app.koofr.net",
|
||||||
|
Required: true,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "mountid",
|
||||||
|
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||||
|
Required: false,
|
||||||
|
Default: "",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "setmtime",
|
||||||
|
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||||
|
Default: true,
|
||||||
|
Required: true,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "user",
|
||||||
|
Help: "Your Koofr user name",
|
||||||
|
Required: true,
|
||||||
|
}, {
|
||||||
|
Name: "password",
|
||||||
|
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||||
|
IsPassword: true,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options represent the configuration of the Koofr backend
|
||||||
|
type Options struct {
|
||||||
|
Endpoint string `config:"endpoint"`
|
||||||
|
MountID string `config:"mountid"`
|
||||||
|
User string `config:"user"`
|
||||||
|
Password string `config:"password"`
|
||||||
|
SetMTime bool `config:"setmtime"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Fs is a representation of a remote Koofr Fs
|
||||||
|
type Fs struct {
|
||||||
|
name string
|
||||||
|
mountID string
|
||||||
|
root string
|
||||||
|
opt Options
|
||||||
|
features *fs.Features
|
||||||
|
client *koofrclient.KoofrClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Object on the remote Koofr Fs
|
||||||
|
type Object struct {
|
||||||
|
fs *Fs
|
||||||
|
remote string
|
||||||
|
info koofrclient.FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func base(pth string) string {
|
||||||
|
rv := path.Base(pth)
|
||||||
|
if rv == "" || rv == "." {
|
||||||
|
rv = "/"
|
||||||
|
}
|
||||||
|
return rv
|
||||||
|
}
|
||||||
|
|
||||||
|
func dir(pth string) string {
|
||||||
|
rv := path.Dir(pth)
|
||||||
|
if rv == "" || rv == "." {
|
||||||
|
rv = "/"
|
||||||
|
}
|
||||||
|
return rv
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the remote Object
|
||||||
|
func (o *Object) String() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the remote path of the Object, relative to Fs root
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the Object
|
||||||
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size return the size of the Object in bytes
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
return o.info.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs returns a reference to the Koofr Fs containing the Object
|
||||||
|
func (o *Object) Fs() fs.Info {
|
||||||
|
return o.fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns an MD5 hash of the Object
|
||||||
|
func (o *Object) Hash(ctx context.Context, typ hash.Type) (string, error) {
|
||||||
|
if typ == hash.MD5 {
|
||||||
|
return o.info.Hash, nil
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fullPath returns full path of the remote Object (including Fs root)
|
||||||
|
func (o *Object) fullPath() string {
|
||||||
|
return o.fs.fullPath(o.remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable returns true if the Object is storable
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime is not supported
|
||||||
|
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||||
|
return fs.ErrorCantSetModTimeWithoutDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens the Object for reading
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||||
|
var sOff, eOff int64 = 0, -1
|
||||||
|
|
||||||
|
fs.FixRangeOption(options, o.Size())
|
||||||
|
for _, option := range options {
|
||||||
|
switch x := option.(type) {
|
||||||
|
case *fs.SeekOption:
|
||||||
|
sOff = x.Offset
|
||||||
|
case *fs.RangeOption:
|
||||||
|
sOff = x.Start
|
||||||
|
eOff = x.End
|
||||||
|
default:
|
||||||
|
if option.Mandatory() {
|
||||||
|
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if sOff == 0 && eOff < 0 {
|
||||||
|
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
|
||||||
|
}
|
||||||
|
span := &koofrclient.FileSpan{
|
||||||
|
Start: sOff,
|
||||||
|
End: eOff,
|
||||||
|
}
|
||||||
|
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update updates the Object contents
|
||||||
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
|
||||||
|
putopts := &koofrclient.PutOptions{
|
||||||
|
ForceOverwrite: true,
|
||||||
|
NoRename: true,
|
||||||
|
OverwriteIgnoreNonExisting: true,
|
||||||
|
SetModified: &mtime,
|
||||||
|
}
|
||||||
|
fullPath := o.fullPath()
|
||||||
|
dirPath := dir(fullPath)
|
||||||
|
name := base(fullPath)
|
||||||
|
err := o.fs.mkdir(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info, err := o.fs.client.FilesPutWithOptions(o.fs.mountID, dirPath, name, in, putopts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o.info = *info
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove deletes the remote Object
|
||||||
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
|
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the Fs
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root returns the root path of the Fs
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the Fs
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return "koofr:" + f.mountID + ":" + f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features supported by this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision denotes that setting modification times is not supported
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
if !f.opt.SetMTime {
|
||||||
|
return fs.ModTimeNotSupported
|
||||||
|
}
|
||||||
|
return time.Millisecond
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes returns a set of hashes are Provided by the Fs
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return hash.Set(hash.MD5)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||||
|
func (f *Fs) fullPath(part string) string {
|
||||||
|
return path.Join("/", f.root, part)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs constructs a new filesystem given a root path and configuration options
|
||||||
|
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||||
|
opt := new(Options)
|
||||||
|
err = configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pass, err := obscure.Reveal(opt.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
|
||||||
|
basicAuth := fmt.Sprintf("Basic %s",
|
||||||
|
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||||
|
client.HTTPClient.Headers.Set("Authorization", basicAuth)
|
||||||
|
mounts, err := client.Mounts()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
CaseInsensitive: true,
|
||||||
|
DuplicateFiles: false,
|
||||||
|
BucketBased: false,
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
}).Fill(f)
|
||||||
|
for _, m := range mounts {
|
||||||
|
if opt.MountID != "" {
|
||||||
|
if m.Id == opt.MountID {
|
||||||
|
f.mountID = m.Id
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else if m.IsPrimary {
|
||||||
|
f.mountID = m.Id
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f.mountID == "" {
|
||||||
|
if opt.MountID == "" {
|
||||||
|
return nil, errors.New("Failed to find primary mount")
|
||||||
|
}
|
||||||
|
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||||
|
}
|
||||||
|
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
|
||||||
|
if err == nil && rootFile.Type != "dir" {
|
||||||
|
f.root = dir(f.root)
|
||||||
|
err = fs.ErrorIsFile
|
||||||
|
} else {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return f, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns a list of items in a directory
|
||||||
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||||
|
if err != nil {
|
||||||
|
return nil, translateErrorsDir(err)
|
||||||
|
}
|
||||||
|
entries = make([]fs.DirEntry, len(files))
|
||||||
|
for i, file := range files {
|
||||||
|
if file.Type == "dir" {
|
||||||
|
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
|
||||||
|
} else {
|
||||||
|
entries[i] = &Object{
|
||||||
|
fs: f,
|
||||||
|
info: file,
|
||||||
|
remote: path.Join(dir, file.Name),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject creates a new remote Object for a given remote path
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err error) {
|
||||||
|
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
|
||||||
|
if err != nil {
|
||||||
|
return nil, translateErrorsObject(err)
|
||||||
|
}
|
||||||
|
if info.Type == "dir" {
|
||||||
|
return nil, fs.ErrorNotAFile
|
||||||
|
}
|
||||||
|
return &Object{
|
||||||
|
fs: f,
|
||||||
|
info: info,
|
||||||
|
remote: remote,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put updates a remote Object
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
|
||||||
|
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
|
||||||
|
putopts := &koofrclient.PutOptions{
|
||||||
|
ForceOverwrite: true,
|
||||||
|
NoRename: true,
|
||||||
|
OverwriteIgnoreNonExisting: true,
|
||||||
|
SetModified: &mtime,
|
||||||
|
}
|
||||||
|
fullPath := f.fullPath(src.Remote())
|
||||||
|
dirPath := dir(fullPath)
|
||||||
|
name := base(fullPath)
|
||||||
|
err = f.mkdir(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
info, err := f.client.FilesPutWithOptions(f.mountID, dirPath, name, in, putopts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, translateErrorsObject(err)
|
||||||
|
}
|
||||||
|
return &Object{
|
||||||
|
fs: f,
|
||||||
|
info: *info,
|
||||||
|
remote: src.Remote(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutStream updates a remote Object with a stream of unknown size
|
||||||
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
return f.Put(ctx, in, src, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBadRequest is a predicate which holds true iff the error returned was
|
||||||
|
// HTTP status 400
|
||||||
|
func isBadRequest(err error) bool {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case httpclient.InvalidStatusError:
|
||||||
|
if err.Got == http.StatusBadRequest {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// translateErrorsDir translates koofr errors to rclone errors (for a dir
|
||||||
|
// operation)
|
||||||
|
func translateErrorsDir(err error) error {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case httpclient.InvalidStatusError:
|
||||||
|
if err.Got == http.StatusNotFound {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
|
||||||
|
func translateErrorsObject(err error) error {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case httpclient.InvalidStatusError:
|
||||||
|
if err.Got == http.StatusNotFound {
|
||||||
|
return fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// mkdir creates a directory at the given remote path. Creates ancestors if
|
||||||
|
// neccessary
|
||||||
|
func (f *Fs) mkdir(fullPath string) error {
|
||||||
|
if fullPath == "/" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
info, err := f.client.FilesInfo(f.mountID, fullPath)
|
||||||
|
if err == nil && info.Type == "dir" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err = translateErrorsDir(err)
|
||||||
|
if err != nil && err != fs.ErrorDirNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dirs := strings.Split(fullPath, "/")
|
||||||
|
parent := "/"
|
||||||
|
for _, part := range dirs {
|
||||||
|
if part == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
|
||||||
|
if err != nil || info.Type != "dir" {
|
||||||
|
err = translateErrorsDir(err)
|
||||||
|
if err != nil && err != fs.ErrorDirNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = f.client.FilesNewFolder(f.mountID, parent, part)
|
||||||
|
if err != nil && !isBadRequest(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
parent = path.Join(parent, part)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir creates a directory at the given remote path. Creates ancestors if
|
||||||
|
// necessary
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
fullPath := f.fullPath(dir)
|
||||||
|
return f.mkdir(fullPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir removes an (empty) directory at the given remote path
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||||
|
if err != nil {
|
||||||
|
return translateErrorsDir(err)
|
||||||
|
}
|
||||||
|
if len(files) > 0 {
|
||||||
|
return fs.ErrorDirectoryNotEmpty
|
||||||
|
}
|
||||||
|
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
|
||||||
|
if err != nil {
|
||||||
|
return translateErrorsDir(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy copies a remote Object to the given path
|
||||||
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
|
dstFullPath := f.fullPath(remote)
|
||||||
|
dstDir := dir(dstFullPath)
|
||||||
|
err := f.mkdir(dstDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fs.ErrorCantCopy
|
||||||
|
}
|
||||||
|
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
|
||||||
|
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
|
||||||
|
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
|
||||||
|
f.mountID, dstFullPath, koofrclient.CopyOptions{SetModified: &mtime})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fs.ErrorCantCopy
|
||||||
|
}
|
||||||
|
return f.NewObject(ctx, remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move moves a remote Object to the given path
|
||||||
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
|
srcObj := src.(*Object)
|
||||||
|
dstFullPath := f.fullPath(remote)
|
||||||
|
dstDir := dir(dstFullPath)
|
||||||
|
err := f.mkdir(dstDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fs.ErrorCantMove
|
||||||
|
}
|
||||||
|
err = f.client.FilesMove(srcObj.fs.mountID,
|
||||||
|
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fs.ErrorCantMove
|
||||||
|
}
|
||||||
|
return f.NewObject(ctx, remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirMove moves a remote directory to the given path
|
||||||
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
|
srcFs := src.(*Fs)
|
||||||
|
srcFullPath := srcFs.fullPath(srcRemote)
|
||||||
|
dstFullPath := f.fullPath(dstRemote)
|
||||||
|
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
|
||||||
|
return fs.ErrorDirExists
|
||||||
|
}
|
||||||
|
dstDir := dir(dstFullPath)
|
||||||
|
err := f.mkdir(dstDir)
|
||||||
|
if err != nil {
|
||||||
|
return fs.ErrorCantDirMove
|
||||||
|
}
|
||||||
|
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
|
||||||
|
if err != nil {
|
||||||
|
return fs.ErrorCantDirMove
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// About reports space usage (with a MB precision)
|
||||||
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
|
mount, err := f.client.MountsDetails(f.mountID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &fs.Usage{
|
||||||
|
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
|
||||||
|
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
|
||||||
|
Trashed: nil,
|
||||||
|
Other: nil,
|
||||||
|
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
|
||||||
|
Objects: nil,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge purges the complete Fs
|
||||||
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
|
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// linkCreate is a Koofr API request for creating a public link
|
||||||
|
type linkCreate struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// link is a Koofr API response to creating a public link
|
||||||
|
type link struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Counter int64 `json:"counter"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
ShortURL string `json:"shortUrl"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
HasPassword bool `json:"hasPassword"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
ValidFrom int64 `json:"validFrom"`
|
||||||
|
ValidTo int64 `json:"validTo"`
|
||||||
|
PasswordRequired bool `json:"passwordRequired"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// createLink makes a Koofr API call to create a public link
|
||||||
|
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
|
||||||
|
linkCreate := linkCreate{
|
||||||
|
Path: path,
|
||||||
|
}
|
||||||
|
linkData := link{}
|
||||||
|
|
||||||
|
request := httpclient.RequestData{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/api/v2/mounts/" + mountID + "/links",
|
||||||
|
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
|
||||||
|
ReqEncoding: httpclient.EncodingJSON,
|
||||||
|
ReqValue: linkCreate,
|
||||||
|
RespEncoding: httpclient.EncodingJSON,
|
||||||
|
RespValue: &linkData,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := c.Request(&request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &linkData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublicLink creates a public link to the remote path
|
||||||
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||||
|
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
|
||||||
|
if err != nil {
|
||||||
|
return "", translateErrorsDir(err)
|
||||||
|
}
|
||||||
|
return linkData.ShortURL, nil
|
||||||
|
}
|
||||||
14
backend/koofr/koofr_test.go
Normal file
14
backend/koofr/koofr_test.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
package koofr_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestKoofr:",
|
||||||
|
})
|
||||||
|
}
|
||||||
0
backend/local/aaaa
Normal file
0
backend/local/aaaa
Normal file
@@ -3,20 +3,21 @@
|
|||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
var s syscall.Statfs_t
|
var s syscall.Statfs_t
|
||||||
err := syscall.Statfs(f.root, &s)
|
err := syscall.Statfs(f.root, &s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||||
}
|
}
|
||||||
bs := int64(s.Bsize)
|
bs := int64(s.Bsize) // nolint: unconvert
|
||||||
usage := &fs.Usage{
|
usage := &fs.Usage{
|
||||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||||
|
|||||||
@@ -3,17 +3,18 @@
|
|||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
var available, total, free int64
|
var available, total, free int64
|
||||||
_, _, e1 := getFreeDiskSpace.Call(
|
_, _, e1 := getFreeDiskSpace.Call(
|
||||||
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||||
|
|||||||
12
backend/local/fadvise_other.go
Normal file
12
backend/local/fadvise_other.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
//+build !linux
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
|
||||||
|
return f
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user