mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
642 Commits
v1.53.4
...
fix-connec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bede3a5d48 | ||
|
|
f52ae75a51 | ||
|
|
9d5c5bf7ab | ||
|
|
53573b4a09 | ||
|
|
3622e064f5 | ||
|
|
6d28ea7ab5 | ||
|
|
b9fd02039b | ||
|
|
1a41c930f3 | ||
|
|
ddb7eb6e0a | ||
|
|
c114695a66 | ||
|
|
fcba51557f | ||
|
|
9393225a1d | ||
|
|
3d3ff61f74 | ||
|
|
d98f192425 | ||
|
|
54771e4402 | ||
|
|
dc286529bc | ||
|
|
7dc7c021db | ||
|
|
fe1aa13069 | ||
|
|
5fa8e7d957 | ||
|
|
9db7c51eaa | ||
|
|
3859fe2f52 | ||
|
|
0caf417779 | ||
|
|
9eab258ffb | ||
|
|
7df57cd625 | ||
|
|
1fd9b483c8 | ||
|
|
93353c431b | ||
|
|
886dfd23e2 | ||
|
|
116a8021bb | ||
|
|
9e2fbe0f1a | ||
|
|
6d65d116df | ||
|
|
edaeb51ea9 | ||
|
|
6e2e2d9eb2 | ||
|
|
20e15e52a9 | ||
|
|
d0f8b4f479 | ||
|
|
58d82a5c73 | ||
|
|
c0c74003f2 | ||
|
|
60bc7a079a | ||
|
|
20c5ca08fb | ||
|
|
fc57648b75 | ||
|
|
8c5c91e68f | ||
|
|
9dd39e8524 | ||
|
|
9c9186183d | ||
|
|
2ccf416e83 | ||
|
|
5577c7b760 | ||
|
|
f6dbb98a1d | ||
|
|
d042f3194f | ||
|
|
524cd327e6 | ||
|
|
b8c1cf7451 | ||
|
|
0fa68bda02 | ||
|
|
1378bfee63 | ||
|
|
d6870473a1 | ||
|
|
12cd322643 | ||
|
|
1406b6c3c9 | ||
|
|
088a83872d | ||
|
|
cb46092883 | ||
|
|
a2cd5d8fa3 | ||
|
|
1fe2460e38 | ||
|
|
ef5c212f9b | ||
|
|
268a7ff7b8 | ||
|
|
b47d6001a9 | ||
|
|
a4c4ddf052 | ||
|
|
4cc2a7f342 | ||
|
|
c72d2c67ed | ||
|
|
9deab5a563 | ||
|
|
da5b0cb611 | ||
|
|
0187bc494a | ||
|
|
2bdbf00fa3 | ||
|
|
9ee3ad70e9 | ||
|
|
ce182adf46 | ||
|
|
97fc3b9046 | ||
|
|
e59acd16c6 | ||
|
|
acfd7e2403 | ||
|
|
f47893873d | ||
|
|
b9a015e5b9 | ||
|
|
d72d9e591a | ||
|
|
df451e1e70 | ||
|
|
d9959b0271 | ||
|
|
f2c0f82fc6 | ||
|
|
f76c6cc893 | ||
|
|
5e95877840 | ||
|
|
8b491f7f3d | ||
|
|
aea8776a43 | ||
|
|
c387eb8c09 | ||
|
|
a12b2746b4 | ||
|
|
3dbef2b2fd | ||
|
|
f111e0eaf8 | ||
|
|
96207f342c | ||
|
|
e25ac4dcf0 | ||
|
|
28f6efe955 | ||
|
|
f17d7c0012 | ||
|
|
3761cf68b4 | ||
|
|
71554c1371 | ||
|
|
8a46dd1b57 | ||
|
|
3b21857097 | ||
|
|
a10fbf16ea | ||
|
|
f4750928ee | ||
|
|
657be2ace5 | ||
|
|
feaaca4987 | ||
|
|
ebd9462ea6 | ||
|
|
6b9e4f939d | ||
|
|
687a3b1832 | ||
|
|
25d5ed763c | ||
|
|
5e038a5e1e | ||
|
|
4b4e531846 | ||
|
|
89e8fb4818 | ||
|
|
b9bf91c510 | ||
|
|
40b58d59ad | ||
|
|
4fbb50422c | ||
|
|
8d847a4e94 | ||
|
|
e3e08a48cb | ||
|
|
ff6868900d | ||
|
|
aab076029f | ||
|
|
294f090361 | ||
|
|
301e1ad982 | ||
|
|
3cf6ea848b | ||
|
|
bb0b6432ae | ||
|
|
46078d391f | ||
|
|
849bf20598 | ||
|
|
e91f2e342a | ||
|
|
713f8f357d | ||
|
|
83368998be | ||
|
|
4013bc4a4c | ||
|
|
32925dae1f | ||
|
|
6cc70997ba | ||
|
|
d260e3824e | ||
|
|
a5bd26395e | ||
|
|
6fa74340a0 | ||
|
|
4d8ef7bca7 | ||
|
|
6a9ae32012 | ||
|
|
a7fd65bf2d | ||
|
|
1fed2d910c | ||
|
|
c95b580478 | ||
|
|
2be310cd6e | ||
|
|
02a5d350f9 | ||
|
|
18cd2064ec | ||
|
|
59ed70ca91 | ||
|
|
6df56c55b0 | ||
|
|
94e34cb783 | ||
|
|
c3e2392f2b | ||
|
|
f7e3115955 | ||
|
|
e01e8010a0 | ||
|
|
75056dc9b2 | ||
|
|
7aa7acd926 | ||
|
|
0ad38dd6fa | ||
|
|
9cc8ff4dd4 | ||
|
|
b029fb591f | ||
|
|
95e9c4e7f1 | ||
|
|
c40bafb72c | ||
|
|
eac77b06ab | ||
|
|
0355d6daf2 | ||
|
|
c4b8df6903 | ||
|
|
0dd3ae5e0d | ||
|
|
e5aa92c922 | ||
|
|
f6265fbeff | ||
|
|
1397b85214 | ||
|
|
86a0dae632 | ||
|
|
076ff96f6b | ||
|
|
985011e73b | ||
|
|
9ca6bf59c6 | ||
|
|
e5d5ae9ab7 | ||
|
|
ac6bb222f9 | ||
|
|
62d5876eb4 | ||
|
|
9808a53416 | ||
|
|
cc08f66dc1 | ||
|
|
6b8da24eb8 | ||
|
|
333faa6c68 | ||
|
|
1b92e4636e | ||
|
|
c5a299d5b1 | ||
|
|
04a8859d29 | ||
|
|
4b5fe3adad | ||
|
|
7db68b72f1 | ||
|
|
9c667be2a1 | ||
|
|
c0cf54067a | ||
|
|
297ca23abd | ||
|
|
d809930e1d | ||
|
|
fdc0528bd5 | ||
|
|
a0320d6e94 | ||
|
|
89bf036e15 | ||
|
|
1605f9e14d | ||
|
|
cd6fd4be4b | ||
|
|
4ea7c7aa47 | ||
|
|
5834020316 | ||
|
|
f5066a09cd | ||
|
|
863bd93c30 | ||
|
|
d96af3b005 | ||
|
|
3280ceee3b | ||
|
|
930bca2478 | ||
|
|
23b12c39bd | ||
|
|
9d37c208b7 | ||
|
|
c81311722e | ||
|
|
843ddd9136 | ||
|
|
a3fcadddc8 | ||
|
|
a63e1f1383 | ||
|
|
5b84adf3b9 | ||
|
|
f890965020 | ||
|
|
f88a5542cf | ||
|
|
fd94b3a473 | ||
|
|
2aebeb6061 | ||
|
|
e779cacc82 | ||
|
|
37e630178e | ||
|
|
2cdc071b85 | ||
|
|
496e32fd8a | ||
|
|
bf3ba50a0f | ||
|
|
22c226b152 | ||
|
|
5ca7f1fe87 | ||
|
|
f14220ef1e | ||
|
|
424aaac2e1 | ||
|
|
47b69d6300 | ||
|
|
c0c2505977 | ||
|
|
2d7afe8690 | ||
|
|
92187a3b33 | ||
|
|
53aa4b87fd | ||
|
|
edfe183ba2 | ||
|
|
dfc63eb8f1 | ||
|
|
f21f2529a3 | ||
|
|
1efb543ad8 | ||
|
|
92e36fcfc5 | ||
|
|
bf8542c670 | ||
|
|
cc5a1e90d8 | ||
|
|
b39fa54ab2 | ||
|
|
f1147fe1dd | ||
|
|
8897377a54 | ||
|
|
f50b4e51ed | ||
|
|
f135acbdfb | ||
|
|
cdd99a6f39 | ||
|
|
6ecb5794bc | ||
|
|
9a21aff4ed | ||
|
|
8574a7bd67 | ||
|
|
a0fc10e41a | ||
|
|
ae3963e4b4 | ||
|
|
e32f08f37b | ||
|
|
fea4b753b2 | ||
|
|
b2b5b7598c | ||
|
|
5f943aabc8 | ||
|
|
84c785bc36 | ||
|
|
993146375e | ||
|
|
bbe791a886 | ||
|
|
1545ace8f2 | ||
|
|
bcac8fdc83 | ||
|
|
15e1a6bee7 | ||
|
|
9710ded60f | ||
|
|
5f3672102c | ||
|
|
644cc69108 | ||
|
|
1415666074 | ||
|
|
bae550c71e | ||
|
|
beff081abb | ||
|
|
7f5ee5d81f | ||
|
|
8b41dfa50a | ||
|
|
0d8bcc08da | ||
|
|
d3b7f14b66 | ||
|
|
f66928a846 | ||
|
|
3b1122c888 | ||
|
|
463a18aa07 | ||
|
|
0a932dc1f2 | ||
|
|
8856e0e559 | ||
|
|
3b6df71838 | ||
|
|
31de631b22 | ||
|
|
189ef5f257 | ||
|
|
2f67681e3b | ||
|
|
41127965b0 | ||
|
|
8171671d82 | ||
|
|
75617c0c3b | ||
|
|
8b9d23916b | ||
|
|
e43b79e33d | ||
|
|
459cc70a50 | ||
|
|
20578f3f89 | ||
|
|
15da53696e | ||
|
|
2bddba118e | ||
|
|
c7e5976e11 | ||
|
|
f0bf9cfda1 | ||
|
|
671dd047f7 | ||
|
|
6272ca74bc | ||
|
|
f5af761466 | ||
|
|
06f1c0c61c | ||
|
|
e6a9f005d6 | ||
|
|
8f6f4b053c | ||
|
|
fe15a2eeeb | ||
|
|
019667170f | ||
|
|
7a496752f3 | ||
|
|
b569dc11a0 | ||
|
|
df4e6079f1 | ||
|
|
6156f90601 | ||
|
|
cdaea62932 | ||
|
|
78afe01d15 | ||
|
|
4eac88babf | ||
|
|
b4217fabd3 | ||
|
|
92b9dabf3c | ||
|
|
4323ff8a63 | ||
|
|
3e188495f5 | ||
|
|
acb9e17eb3 | ||
|
|
c8ab4f1d02 | ||
|
|
e776a1b122 | ||
|
|
c57af26de9 | ||
|
|
7d89912666 | ||
|
|
cd075f1703 | ||
|
|
35b2ca642c | ||
|
|
127f48e8ad | ||
|
|
3e986cdf54 | ||
|
|
b80d498304 | ||
|
|
757e696a6b | ||
|
|
e3979131f2 | ||
|
|
a774f6bfdb | ||
|
|
d7cd35e2ca | ||
|
|
38e70f1797 | ||
|
|
3b49440c25 | ||
|
|
7c0287b824 | ||
|
|
f97c2c85bd | ||
|
|
14c0d8a93e | ||
|
|
768ad4de2a | ||
|
|
817987dfc4 | ||
|
|
eb090d3544 | ||
|
|
4daf8b7083 | ||
|
|
0be69018b8 | ||
|
|
9b9ab5f3e8 | ||
|
|
072464cbdb | ||
|
|
b0491dec88 | ||
|
|
ccfefedb47 | ||
|
|
2fffcf9e7f | ||
|
|
a39a5d261c | ||
|
|
45b57822d5 | ||
|
|
d8984cd37f | ||
|
|
80e63af470 | ||
|
|
db2c38b21b | ||
|
|
cef51d58ac | ||
|
|
e0b5a13a13 | ||
|
|
de21356154 | ||
|
|
35a4de2030 | ||
|
|
847625822f | ||
|
|
3877df4e62 | ||
|
|
ec73d2fb9a | ||
|
|
a7689d7023 | ||
|
|
847a44e7ad | ||
|
|
b3710c962e | ||
|
|
35ccfe1721 | ||
|
|
ef2bfb9718 | ||
|
|
a97effa27c | ||
|
|
01adee7554 | ||
|
|
78a76b0d29 | ||
|
|
e775328523 | ||
|
|
50344e7792 | ||
|
|
d58fdb10db | ||
|
|
feaacfd226 | ||
|
|
e3c238ac95 | ||
|
|
752997c5e8 | ||
|
|
71edc75ca6 | ||
|
|
768e4c4735 | ||
|
|
c553ad5158 | ||
|
|
c66b901320 | ||
|
|
dd67a3d5f5 | ||
|
|
e972f2c98a | ||
|
|
acbcb1ea9d | ||
|
|
d4444375ac | ||
|
|
00bf40a8ef | ||
|
|
5d1f947f32 | ||
|
|
b594cb9430 | ||
|
|
add7a35e55 | ||
|
|
2af7b61fc3 | ||
|
|
cb97c2b0d3 | ||
|
|
35da38e93f | ||
|
|
963c0f28b9 | ||
|
|
b3815dc0c2 | ||
|
|
8053fc4e16 | ||
|
|
66c3f2f31f | ||
|
|
62c9074132 | ||
|
|
a854cb9617 | ||
|
|
fbf9942fe7 | ||
|
|
f425950a52 | ||
|
|
1d40bc1901 | ||
|
|
ba51409c3c | ||
|
|
a64fc05385 | ||
|
|
4d54454900 | ||
|
|
5601652d65 | ||
|
|
b218bc5bed | ||
|
|
65eee674b9 | ||
|
|
72eb74e94a | ||
|
|
6bfec25165 | ||
|
|
1c61d51448 | ||
|
|
f7fe1d766b | ||
|
|
55aec19389 | ||
|
|
9db51117dc | ||
|
|
a9c9467210 | ||
|
|
f50e15c77c | ||
|
|
e3191d096f | ||
|
|
07c40780b3 | ||
|
|
67b82b4a28 | ||
|
|
5f47e1e034 | ||
|
|
e92cb9e8f8 | ||
|
|
9ea990d5a2 | ||
|
|
08b9ede217 | ||
|
|
6342499c47 | ||
|
|
f347a198f7 | ||
|
|
060642ad14 | ||
|
|
629c0d0f65 | ||
|
|
f7404f52e7 | ||
|
|
74a321e156 | ||
|
|
fce885c0cd | ||
|
|
4028a245b0 | ||
|
|
c5b07a6714 | ||
|
|
b0965bf34f | ||
|
|
1eaca9fb45 | ||
|
|
d833e49db9 | ||
|
|
3aee544cee | ||
|
|
9e87f5090f | ||
|
|
c8cfa43ccc | ||
|
|
ed7af3f370 | ||
|
|
be19d6a403 | ||
|
|
46858ee6fe | ||
|
|
a94e4d803b | ||
|
|
dcbe62ab0a | ||
|
|
121b981b49 | ||
|
|
73bb9322f5 | ||
|
|
bdc2278a30 | ||
|
|
ea8d13d841 | ||
|
|
e45716cac2 | ||
|
|
c98dd8755c | ||
|
|
5ae5e1dd56 | ||
|
|
4f8ee736b1 | ||
|
|
816e68a274 | ||
|
|
6ab6c8eefa | ||
|
|
cb16f42075 | ||
|
|
7ae84a3c91 | ||
|
|
2fd543c989 | ||
|
|
50cf97fc72 | ||
|
|
4acd68188b | ||
|
|
b81b6da3fc | ||
|
|
56ad6aac4d | ||
|
|
1efb8ea280 | ||
|
|
9cfc01f791 | ||
|
|
86014cebd7 | ||
|
|
507f861c67 | ||
|
|
e073720a8f | ||
|
|
ce7cdadb71 | ||
|
|
a223b78872 | ||
|
|
d5181118cc | ||
|
|
886b3abac1 | ||
|
|
250f8d9371 | ||
|
|
8a429d12cf | ||
|
|
8bf4697dc2 | ||
|
|
584523672c | ||
|
|
a9585efd64 | ||
|
|
f6b1f05e0f | ||
|
|
cc8538e0d1 | ||
|
|
f7d9b15707 | ||
|
|
83406bc473 | ||
|
|
1cfce703b2 | ||
|
|
3b24a4cada | ||
|
|
135adb426e | ||
|
|
987dac9fe5 | ||
|
|
7fde48a805 | ||
|
|
ce9028bb5b | ||
|
|
52688a63c6 | ||
|
|
8904e81cdf | ||
|
|
bcbe393af3 | ||
|
|
47aada16a0 | ||
|
|
c22d04aa30 | ||
|
|
354b4f19ec | ||
|
|
0ed1857fa9 | ||
|
|
dfadd98969 | ||
|
|
19a8b66cee | ||
|
|
07dee18d6b | ||
|
|
70e8b11805 | ||
|
|
9d574c0d63 | ||
|
|
2e21c58e6a | ||
|
|
506342317b | ||
|
|
979bb07c86 | ||
|
|
dfeae0e70a | ||
|
|
f43a9ac17e | ||
|
|
c3ac9319f4 | ||
|
|
76ee3060d1 | ||
|
|
4bb241c435 | ||
|
|
a06f4c2514 | ||
|
|
53aa03cc44 | ||
|
|
1ce0b45965 | ||
|
|
7078311a84 | ||
|
|
ef9b717961 | ||
|
|
09246ed9d5 | ||
|
|
33ea55efed | ||
|
|
79474b2e4c | ||
|
|
fb001b6c01 | ||
|
|
2896f51a22 | ||
|
|
5b9115d87a | ||
|
|
211b08f771 | ||
|
|
f0905499e3 | ||
|
|
7985df3768 | ||
|
|
095c7bd801 | ||
|
|
23469c9c7c | ||
|
|
2347762b0d | ||
|
|
636fb5344a | ||
|
|
aaa8b7738a | ||
|
|
bc4282e49e | ||
|
|
2812816142 | ||
|
|
ceeac84cfe | ||
|
|
83d48f65b6 | ||
|
|
95d0410baa | ||
|
|
2708a7569e | ||
|
|
45e8bea8d0 | ||
|
|
f980f230c5 | ||
|
|
e204f89685 | ||
|
|
f7efce594b | ||
|
|
1fb6ad700f | ||
|
|
e3fe31f7cb | ||
|
|
8b96933e58 | ||
|
|
d69b96a94c | ||
|
|
d846210978 | ||
|
|
30c8b1b84f | ||
|
|
43e0929339 | ||
|
|
6c70c42577 | ||
|
|
cd2c06f2a7 | ||
|
|
af55a74bd2 | ||
|
|
d00c126cef | ||
|
|
bedf6e90d2 | ||
|
|
e8c84d8b53 | ||
|
|
f89ff3872d | ||
|
|
127f0fc64c | ||
|
|
0cfa89f316 | ||
|
|
bfcd4113c3 | ||
|
|
0e7fc7613f | ||
|
|
8ac2f52b6e | ||
|
|
1973fc1ecc | ||
|
|
7c39a13281 | ||
|
|
c5c503cbbe | ||
|
|
d09488b829 | ||
|
|
0a6196716c | ||
|
|
8bc9b2b883 | ||
|
|
a15f50254a | ||
|
|
5d4f77a022 | ||
|
|
a089de0964 | ||
|
|
3068ae8447 | ||
|
|
67ff153b0c | ||
|
|
3e1cb8302a | ||
|
|
e4a87f772f | ||
|
|
d4f38d45a5 | ||
|
|
bbe7eb35f1 | ||
|
|
87e54f2dde | ||
|
|
3f3afe489f | ||
|
|
70b21d9c87 | ||
|
|
e00bf3d723 | ||
|
|
605f2b819a | ||
|
|
bf2b975359 | ||
|
|
00a5086ff2 | ||
|
|
be6a888e50 | ||
|
|
dad8447423 | ||
|
|
65ff109065 | ||
|
|
b7253fc1c1 | ||
|
|
d143f576c6 | ||
|
|
a152351a71 | ||
|
|
a2fa1370c5 | ||
|
|
bed83b0b64 | ||
|
|
cf0bdad5de | ||
|
|
85d35ef03c | ||
|
|
514d10b314 | ||
|
|
5164c3d2d0 | ||
|
|
ffdd0719e7 | ||
|
|
4e2b5389d7 | ||
|
|
dc4e63631f | ||
|
|
275bf456d3 | ||
|
|
7dfa871095 | ||
|
|
70cc88de22 | ||
|
|
4bc0f46955 | ||
|
|
5b09599a23 | ||
|
|
f4dd8e3fe8 | ||
|
|
d0888edc0a | ||
|
|
51a230d7fd | ||
|
|
fc5b14b620 | ||
|
|
bbddadbd04 | ||
|
|
7428e47ebc | ||
|
|
72083c65ad | ||
|
|
70f92fd6b3 | ||
|
|
a86cedbc24 | ||
|
|
0906f8dd3b | ||
|
|
664213cedb | ||
|
|
75a7226174 | ||
|
|
9e925becb6 | ||
|
|
e3a5bb9b48 | ||
|
|
b7eeb0e260 | ||
|
|
84d64ddabc | ||
|
|
6c9f92aee6 | ||
|
|
893297760b | ||
|
|
c5c56cda02 | ||
|
|
2295123cad | ||
|
|
ff0280c0cb | ||
|
|
64d736a57b | ||
|
|
5f1d5a1897 | ||
|
|
aac2406e19 | ||
|
|
6dc28ef50a | ||
|
|
66def93373 | ||
|
|
c58023a9ba | ||
|
|
3edc9ff0b0 | ||
|
|
8e8ae1edc7 | ||
|
|
20b00db390 | ||
|
|
db4bbf9521 | ||
|
|
2b7994e739 | ||
|
|
e7fbdac8e0 | ||
|
|
41ec712aa9 | ||
|
|
17acae2b00 | ||
|
|
57261c7e97 | ||
|
|
d8239e0194 | ||
|
|
004c3796de | ||
|
|
18c7549770 | ||
|
|
e5190f14ce | ||
|
|
433b73a5a8 | ||
|
|
ab88a3341f | ||
|
|
181da3ce9b | ||
|
|
b14a58c9b8 | ||
|
|
60cc2cba1f | ||
|
|
c797494d88 | ||
|
|
e2a57182be | ||
|
|
8928441466 | ||
|
|
0e8965060f | ||
|
|
f3cf6fcdd7 | ||
|
|
18ccf0f871 | ||
|
|
313647bcf3 | ||
|
|
61fe068c90 | ||
|
|
5c49096e11 | ||
|
|
a73c78545d | ||
|
|
e0fd560711 | ||
|
|
6a56ac1032 | ||
|
|
96299629b4 | ||
|
|
75de30cfa8 | ||
|
|
233bed6a73 | ||
|
|
b3964efe4d | ||
|
|
575f061629 | ||
|
|
640d7d3b4e | ||
|
|
e92294b482 | ||
|
|
22937e8982 | ||
|
|
c3d1474eb9 | ||
|
|
e2426ea87b | ||
|
|
e58a61175f | ||
|
|
05bea46c3e | ||
|
|
c8a719ae0d | ||
|
|
c3884aafd9 | ||
|
|
0a9785a4ff | ||
|
|
8140f67092 | ||
|
|
4a001b8a02 | ||
|
|
525433e6dd | ||
|
|
f71f6c57d7 | ||
|
|
e35623c72e | ||
|
|
344bce7e2a | ||
|
|
3a4322a7ba | ||
|
|
27b9ae4fc3 | ||
|
|
7e2488af10 | ||
|
|
41ecb586c4 |
39
.github/ISSUE_TEMPLATE/Bug.md
vendored
39
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -5,19 +5,31 @@ about: Report a problem with rclone
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
**STOP and READ**
|
||||
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||
Please show the effort you've put in to solving the problem and please be specific.
|
||||
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||
|
||||
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||
|
||||
If you can still replicate it or just got a question then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
for a quick response instead of filing an issue on this repo.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
If nothing else helps, then please fill in the info below which helps us help you.
|
||||
|
||||
https://beta.rclone.org/
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
**DO NOT REDACT** any information except passwords/keys/personal info.
|
||||
|
||||
You should use 3 backticks to begin and end your paste to make it readable.
|
||||
|
||||
Make sure to include a log obtained with '-vv'.
|
||||
|
||||
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
|
||||
|
||||
Thank you
|
||||
|
||||
@@ -25,6 +37,11 @@ The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
@@ -33,18 +50,18 @@ The Rclone Developers
|
||||
|
||||
|
||||
|
||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
#### Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
||||
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (eg Google Drive)
|
||||
#### Which cloud storage system are you using? (e.g. Google Drive)
|
||||
|
||||
|
||||
|
||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
#### The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
16
.github/ISSUE_TEMPLATE/Feature.md
vendored
16
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -7,12 +7,16 @@ about: Suggest a new feature or enhancement for rclone
|
||||
|
||||
Welcome :-)
|
||||
|
||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
So you've got an idea to improve rclone? We love that!
|
||||
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
|
||||
Here is a checklist of things to do:
|
||||
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
|
||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||
|
||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum first: https://forum.rclone.org/
|
||||
If it still isn't there, here is a checklist of things to do:
|
||||
|
||||
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum: https://forum.rclone.org/
|
||||
3. Make a feature request issue (this is the right place!).
|
||||
4. Be prepared to get involved making the feature :-)
|
||||
|
||||
@@ -23,6 +27,10 @@ The Rclone Developers
|
||||
-->
|
||||
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
|
||||
#### What is your current rclone version (output from `rclone version`)?
|
||||
|
||||
|
||||
|
||||
186
.github/workflows/build.yml
vendored
186
.github/workflows/build.yml
vendored
@@ -19,12 +19,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.11', 'go1.12', 'go1.13', 'go1.14']
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
go: '1.16.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -32,51 +32,50 @@ jobs:
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac
|
||||
- job_name: mac_amd64
|
||||
os: macOS-latest
|
||||
go: '1.15.x'
|
||||
go: '1.16.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macOS-latest
|
||||
go: '1.16.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.15.x'
|
||||
go: '1.16.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.15.x'
|
||||
go: '1.16.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
go: '1.16.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.11
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.12
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.13
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
@@ -88,6 +87,12 @@ jobs:
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.15
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -107,10 +112,11 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
@@ -125,7 +131,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
@@ -133,10 +139,10 @@ jobs:
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
@@ -207,50 +213,98 @@ jobs:
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
xgo:
|
||||
timeout-minutes: 60
|
||||
name: "xgo cross compile"
|
||||
runs-on: ubuntu-latest
|
||||
android:
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
steps:
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.14
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.14
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||
path: ./src/github.com/rclone/rclone
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
|
||||
# xgo \
|
||||
# -image=billziss/xgo-cgofuse \
|
||||
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
# -tags cmount \
|
||||
# -dest build \
|
||||
# .
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=android/*,ios/* \
|
||||
-dest build \
|
||||
.
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
- name: build native rclone
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
*~
|
||||
_junk/
|
||||
rclone
|
||||
rclone.exe
|
||||
build
|
||||
docs/public
|
||||
rclone.iml
|
||||
|
||||
@@ -12,10 +12,10 @@ When filing an issue, please include the following information if
|
||||
possible as well as a description of the problem. Make sure you test
|
||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
|
||||
* Rclone version (eg output from `rclone -V`)
|
||||
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
* Rclone version (e.g. output from `rclone -V`)
|
||||
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
||||
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
|
||||
## Submitting a pull request ##
|
||||
@@ -33,10 +33,11 @@ page](https://github.com/rclone/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get -u github.com/rclone/rclone
|
||||
cd $GOPATH/src/github.com/rclone/rclone
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
go build
|
||||
|
||||
Make a branch to add your new feature
|
||||
|
||||
@@ -48,7 +49,7 @@ When ready - run the unit tests for the code you changed
|
||||
|
||||
go test -v
|
||||
|
||||
Note that you may need to make a test remote, eg `TestSwift` for some
|
||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
Note the top level Makefile targets
|
||||
@@ -72,7 +73,7 @@ Make sure you
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push origin my-new-feature
|
||||
git push -u origin my-new-feature
|
||||
|
||||
Go to the GitHub website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
@@ -86,7 +87,7 @@ git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
||||
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||
git commit # Add a new commit message.
|
||||
git push --force # Push the squashed commit to your GitHub repo.
|
||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
|
||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
|
||||
```
|
||||
|
||||
## CI for your fork ##
|
||||
@@ -99,7 +100,7 @@ rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
|
||||
|
||||
rclone contains a mixture of unit tests and integration tests.
|
||||
Because it is difficult (and in some respects pointless) to test cloud
|
||||
storage systems by mocking all their interfaces, rclone unit tests can
|
||||
@@ -115,8 +116,8 @@ are skipped if `TestDrive:` isn't defined.
|
||||
cd backend/drive
|
||||
go test -v
|
||||
|
||||
You can then run the integration tests which tests all of rclone's
|
||||
operations. Normally these get run against the local filing system,
|
||||
You can then run the integration tests which test all of rclone's
|
||||
operations. Normally these get run against the local file system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd fs/sync
|
||||
@@ -127,7 +128,7 @@ but they can be run against any of the remotes.
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
all together with an HTML report and test retries then from the
|
||||
altogether with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
@@ -170,7 +171,7 @@ with modules beneath.
|
||||
* log - logging facilities
|
||||
* march - iterates directories in lock step
|
||||
* object - in memory Fs objects
|
||||
* operations - primitives for sync, eg Copy, Move
|
||||
* operations - primitives for sync, e.g. Copy, Move
|
||||
* sync - sync directories
|
||||
* walk - walk a directory
|
||||
* fstest - provides integration test framework
|
||||
@@ -178,7 +179,7 @@ with modules beneath.
|
||||
* mockdir - mocks an fs.Directory
|
||||
* mockobject - mocks an fs.Object
|
||||
* test_all - Runs integration tests for everything
|
||||
* graphics - the images used in the website etc
|
||||
* graphics - the images used in the website, etc.
|
||||
* lib - libraries used by the backend
|
||||
* atexit - register functions to run when rclone exits
|
||||
* dircache - directory ID to name caching
|
||||
@@ -202,12 +203,12 @@ for the flag help, the remainder is shown to the user in `rclone
|
||||
config` and is added to the docs with `make backenddocs`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
|
||||
Documentation for rclone sub commands is with their code, eg
|
||||
Documentation for rclone sub commands is with their code, e.g.
|
||||
`cmd/ls/ls.go`.
|
||||
|
||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
@@ -265,7 +266,7 @@ rclone uses the [go
|
||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||
support in go1.11 and later to manage its dependencies.
|
||||
|
||||
rclone can be built with modules outside of the GOPATH
|
||||
rclone can be built with modules outside of the `GOPATH`.
|
||||
|
||||
To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency and add it to
|
||||
@@ -333,8 +334,8 @@ Getting going
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Unit tests
|
||||
@@ -364,7 +365,7 @@ See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from
|
||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||
alphabetical order of full name of remote (eg `drive` is ordered as
|
||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
|
||||
* `README.md` - main GitHub page
|
||||
@@ -400,7 +401,7 @@ Usage
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
|
||||
|
||||
Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
|
||||
@@ -16,6 +16,8 @@ RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone
|
||||
|
||||
ENTRYPOINT [ "rclone" ]
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
@@ -11,7 +11,7 @@ Current active maintainers of rclone are:
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
@@ -37,7 +37,7 @@ Rclone uses the labels like this:
|
||||
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
* `maintenance` - internal enhancement, code re-organisation etc
|
||||
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
* `Remote: XXX` - which rclone backend this affects
|
||||
@@ -45,7 +45,7 @@ Rclone uses the labels like this:
|
||||
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
|
||||
|
||||
The milestones have these meanings:
|
||||
|
||||
|
||||
21469
MANUAL.html
generated
21469
MANUAL.html
generated
File diff suppressed because one or more lines are too long
22463
MANUAL.txt
generated
22463
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
41
Makefile
41
Makefile
@@ -8,7 +8,8 @@ VERSION := $(shell cat VERSION)
|
||||
# Last tag on this branch
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
# Next version
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
|
||||
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
|
||||
# If we are working on a release, override branch to master
|
||||
ifdef RELEASE_TAG
|
||||
BRANCH := master
|
||||
@@ -45,13 +46,13 @@ endif
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
rclone:
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@@ -92,8 +93,7 @@ build_dep:
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
@@ -119,7 +119,7 @@ doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs rcdocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
@@ -164,6 +164,11 @@ validate_website: website
|
||||
tarball:
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
|
||||
vendorball:
|
||||
go mod vendor
|
||||
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
|
||||
rm -rf vendor
|
||||
|
||||
sign_upload:
|
||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||
@@ -182,10 +187,10 @@ upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
|
||||
@@ -193,23 +198,23 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
ci_upload:
|
||||
sudo chown -R $$USER build
|
||||
find build -type l -delete
|
||||
gzip -r9v build
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
ci_beta:
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
@@ -227,7 +232,7 @@ tag: retag doc
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(VERSION)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
@echo "And finally run make retag before make cross, etc."
|
||||
|
||||
retag:
|
||||
@echo "Version is $(VERSION)"
|
||||
@@ -239,7 +244,15 @@ startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
startstable:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
@@ -30,11 +30,13 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
@@ -64,9 +66,11 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
@@ -81,6 +85,7 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
|
||||
81
RELEASE.md
81
RELEASE.md
@@ -4,12 +4,12 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Extra required software for making a release
|
||||
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
## Making a release
|
||||
|
||||
* git checkout master
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
@@ -21,16 +21,17 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push --tags origin master
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make vendorball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
* make startdev
|
||||
* make startdev # make startstable for stable branch
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
@@ -41,68 +42,58 @@ Early in the next release cycle update the dependencies
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
Set vars
|
||||
|
||||
* BASE_TAG=v1.XX # e.g. v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
* FIXME this is now broken with new semver layout - needs fixing
|
||||
* FIXME the TAG=${NEW_TAG} shouldn't be necessary any more
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEXT_VERSION=${NEW_TAG} tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
* git tag -d ${NEW_TAG}
|
||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||
* git push --tags -u origin ${BASE_TAG}-stable
|
||||
* Wait for builds to complete
|
||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||
* make TAG=${NEW_TAG} tarball
|
||||
* make TAG=${NEW_TAG} sign_upload
|
||||
* make TAG=${NEW_TAG} check_sign
|
||||
* make TAG=${NEW_TAG} upload
|
||||
* make TAG=${NEW_TAG} upload_website
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* git co master
|
||||
* make VERSION=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
* Announce!
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||
or needs to be updated then rebuild like this.
|
||||
|
||||
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||
|
||||
```
|
||||
git co v1.54.1
|
||||
docker pull golang
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
docker buildx create --name actions_builder --use
|
||||
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx stop actions_builder
|
||||
```
|
||||
|
||||
### Old build for linux/amd64 only
|
||||
|
||||
```
|
||||
docker pull golang
|
||||
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
@@ -34,7 +35,7 @@ type Options struct {
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -47,5 +48,5 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
return cache.Get(fspath.JoinRootPath(opt.Remote, root))
|
||||
return cache.Get(ctx, fspath.JoinRootPath(opt.Remote, root))
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -19,7 +20,7 @@ var (
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
config.LoadConfig()
|
||||
configfile.LoadConfig(context.Background())
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
@@ -54,21 +55,22 @@ func TestNewFS(t *testing.T) {
|
||||
{"four/under four.txt", 9, false},
|
||||
}},
|
||||
{"four", "..", "", true, []testEntry{
|
||||
{"four", -1, true},
|
||||
{"one%.txt", 6, false},
|
||||
{"three", -1, true},
|
||||
{"two.html", 7, false},
|
||||
{"five", -1, true},
|
||||
{"under four.txt", 9, false},
|
||||
}},
|
||||
{"four", "../three", "", true, []testEntry{
|
||||
{"", "../../three", "", true, []testEntry{
|
||||
{"underthree.txt", 9, false},
|
||||
}},
|
||||
{"four", "../../five", "", true, []testEntry{
|
||||
{"underfive.txt", 6, false},
|
||||
}},
|
||||
} {
|
||||
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
||||
|
||||
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
||||
require.NoError(t, err, what)
|
||||
prepare(t, remoteRoot)
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||
require.NoError(t, err, what)
|
||||
gotEntries, err := f.List(context.Background(), test.fsList)
|
||||
require.NoError(t, err, what)
|
||||
@@ -90,7 +92,7 @@ func TestNewFS(t *testing.T) {
|
||||
|
||||
func TestNewFSNoRemote(t *testing.T) {
|
||||
prepare(t, "")
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, f)
|
||||
@@ -98,7 +100,7 @@ func TestNewFSNoRemote(t *testing.T) {
|
||||
|
||||
func TestNewFSInvalidRemote(t *testing.T) {
|
||||
prepare(t, "not_existing_test_remote:")
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, f)
|
||||
|
||||
@@ -9,13 +9,16 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
@@ -40,4 +43,5 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
_ "github.com/rclone/rclone/backend/zoho"
|
||||
)
|
||||
|
||||
@@ -70,8 +70,8 @@ func init() {
|
||||
Prefix: "acd",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig, nil)
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
@@ -144,6 +144,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
ci *fs.ConfigInfo // global config
|
||||
c *acd.Client // the connection to the acd server
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
@@ -204,7 +205,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 401 {
|
||||
f.tokenRenewer.Invalidate()
|
||||
@@ -239,8 +243,7 @@ func filterRequest(req *http.Request) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -248,7 +251,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
@@ -256,29 +259,31 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
c: c,
|
||||
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||
noAuthClient: fshttp.NewClient(ctx),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.getRootInfo()
|
||||
_, err := f.getRootInfo(ctx)
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -286,14 +291,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, resp, err = f.c.Account.GetEndpoints()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get endpoints")
|
||||
}
|
||||
|
||||
// Get rootID
|
||||
rootInfo, err := f.getRootInfo()
|
||||
rootInfo, err := f.getRootInfo(ctx)
|
||||
if err != nil || rootInfo.Id == nil {
|
||||
return nil, errors.Wrap(err, "failed to get root")
|
||||
}
|
||||
@@ -335,11 +340,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// getRootInfo gets the root folder info
|
||||
func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
|
||||
func (f *Fs) getRootInfo(ctx context.Context) (rootInfo *acd.Folder, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rootInfo, resp, err = f.c.Nodes.GetRoot()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return rootInfo, err
|
||||
}
|
||||
@@ -378,7 +383,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == acd.ErrorNodeNotFound {
|
||||
@@ -405,7 +410,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -426,7 +431,7 @@ type listAllFn func(*acd.Node) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
query := "parents:" + dirID
|
||||
if directoriesOnly {
|
||||
query += " AND kind:" + folderKind
|
||||
@@ -447,7 +452,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
var resp *http.Response
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -502,11 +507,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
maxTries := f.ci.LowLevelRetries
|
||||
var iErr error
|
||||
for tries := 1; tries <= maxTries; tries++ {
|
||||
entries = nil
|
||||
_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
|
||||
_, err = f.listAll(ctx, directoryID, "", false, false, func(node *acd.Node) bool {
|
||||
remote := path.Join(dir, *node.Name)
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
@@ -523,7 +528,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
entries = append(entries, o)
|
||||
default:
|
||||
// ignore ASSET etc
|
||||
// ignore ASSET, etc.
|
||||
}
|
||||
return false
|
||||
})
|
||||
@@ -665,7 +670,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -680,7 +685,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -706,7 +711,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
|
||||
err = f.moveNode(ctx, srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -717,7 +722,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
dstObj fs.Object
|
||||
srcErr, dstErr error
|
||||
)
|
||||
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
||||
for i := 1; i <= f.ci.LowLevelRetries; i++ {
|
||||
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
||||
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
||||
// exit if error on source
|
||||
@@ -732,7 +737,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// finished if src not found and dst found
|
||||
break
|
||||
}
|
||||
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
|
||||
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return dstObj, dstErr
|
||||
@@ -745,7 +750,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -801,7 +806,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
var jsonStr string
|
||||
err = srcFs.pacer.Call(func() (bool, error) {
|
||||
jsonStr, err = srcInfo.GetMetadata()
|
||||
return srcFs.shouldRetry(nil, err)
|
||||
return srcFs.shouldRetry(ctx, nil, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
|
||||
@@ -813,7 +818,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return err
|
||||
}
|
||||
|
||||
err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
|
||||
err = f.moveNode(ctx, srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -838,7 +843,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if check {
|
||||
// check directory is empty
|
||||
empty := true
|
||||
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
||||
_, err = f.listAll(ctx, rootID, "", false, false, func(node *acd.Node) bool {
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
empty = false
|
||||
@@ -863,7 +868,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = node.Trash()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -893,7 +898,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -985,7 +990,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var info *acd.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == acd.ErrorNodeNotFound {
|
||||
@@ -1042,7 +1047,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
} else {
|
||||
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
|
||||
}
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return in, err
|
||||
}
|
||||
@@ -1065,7 +1070,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1075,70 +1080,70 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Remove a node
|
||||
func (f *Fs) removeNode(info *acd.Node) error {
|
||||
func (f *Fs) removeNode(ctx context.Context, info *acd.Node) error {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = info.Trash()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.removeNode(o.info)
|
||||
return o.fs.removeNode(ctx, o.info)
|
||||
}
|
||||
|
||||
// Restore a node
|
||||
func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
|
||||
func (f *Fs) restoreNode(ctx context.Context, info *acd.Node) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Restore()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
}
|
||||
|
||||
// Changes name of given node
|
||||
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
func (f *Fs) renameNode(ctx context.Context, info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
}
|
||||
|
||||
// Replaces one parent with another, effectively moving the file. Leaves other
|
||||
// parents untouched. ReplaceParent cannot be used when the file is trashed.
|
||||
func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error {
|
||||
func (f *Fs) replaceParent(ctx context.Context, info *acd.Node, oldParentID string, newParentID string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := info.ReplaceParent(oldParentID, newParentID)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Adds one additional parent to object.
|
||||
func (f *Fs) addParent(info *acd.Node, newParentID string) error {
|
||||
func (f *Fs) addParent(ctx context.Context, info *acd.Node, newParentID string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := info.AddParent(newParentID)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Remove given parent from object, leaving the other possible
|
||||
// parents untouched. Object can end up having no parents.
|
||||
func (f *Fs) removeParent(info *acd.Node, parentID string) error {
|
||||
func (f *Fs) removeParent(ctx context.Context, info *acd.Node, parentID string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := info.RemoveParent(parentID)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// moveNode moves the node given from the srcLeaf,srcDirectoryID to
|
||||
// the dstLeaf,dstDirectoryID
|
||||
func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
|
||||
func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
|
||||
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
|
||||
cantMove := fs.ErrorCantMove
|
||||
if useDirErrorMsgs {
|
||||
@@ -1152,7 +1157,7 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
|
||||
|
||||
if srcLeaf != dstLeaf {
|
||||
// fs.Debugf(name, "renaming")
|
||||
_, err = f.renameNode(srcInfo, dstLeaf)
|
||||
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: quick path rename failed: %v", err)
|
||||
goto OnConflict
|
||||
@@ -1160,7 +1165,7 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
|
||||
}
|
||||
if srcDirectoryID != dstDirectoryID {
|
||||
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
|
||||
err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
|
||||
err = f.replaceParent(ctx, srcInfo, srcDirectoryID, dstDirectoryID)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
|
||||
return err
|
||||
@@ -1173,13 +1178,13 @@ OnConflict:
|
||||
fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
|
||||
|
||||
// fs.Debugf(name, "Trashing file")
|
||||
err = f.removeNode(srcInfo)
|
||||
err = f.removeNode(ctx, srcInfo)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: remove node failed: %v", err)
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(name, "Renaming file")
|
||||
_, err = f.renameNode(srcInfo, dstLeaf)
|
||||
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: rename node failed: %v", err)
|
||||
return err
|
||||
@@ -1187,19 +1192,19 @@ OnConflict:
|
||||
// note: replacing parent is forbidden by API, modifying them individually is
|
||||
// okay though
|
||||
// fs.Debugf(name, "Adding target parent")
|
||||
err = f.addParent(srcInfo, dstDirectoryID)
|
||||
err = f.addParent(ctx, srcInfo, dstDirectoryID)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: addParent failed: %v", err)
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(name, "removing original parent")
|
||||
err = f.removeParent(srcInfo, srcDirectoryID)
|
||||
err = f.removeParent(ctx, srcInfo, srcDirectoryID)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: removeParent failed: %v", err)
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(name, "Restoring")
|
||||
_, err = f.restoreNode(srcInfo)
|
||||
_, err = f.restoreNode(ctx, srcInfo)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: restoreNode node failed: %v", err)
|
||||
return err
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -33,10 +33,9 @@ import (
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -47,15 +46,12 @@ const (
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
defaultUploadCutoff = 256 * fs.MebiByte
|
||||
maxUploadCutoff = 256 * fs.MebiByte
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
uploadConcurrency = 4
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
// Default storage account, key and blob endpoint for emulator support,
|
||||
// though it is a base64 key checked in here, it is publicly available secret.
|
||||
emulatorAccount = "devstoreaccount1"
|
||||
@@ -65,6 +61,10 @@ const (
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
|
||||
var (
|
||||
errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete"))
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -74,12 +74,51 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
|
||||
}, {
|
||||
Name: "service_principal_file",
|
||||
Help: `Path to file containing credentials for use with a service principal.
|
||||
|
||||
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
|
||||
|
||||
$ az sp create-for-rbac --name "<name>" \
|
||||
--role "Storage Blob Data Owner" \
|
||||
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
|
||||
> azure-principal.json
|
||||
|
||||
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli)
|
||||
for more details.
|
||||
`,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
|
||||
}, {
|
||||
Name: "sas_url",
|
||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
|
||||
}, {
|
||||
Name: "use_msi",
|
||||
Help: `Use a managed service identity to authenticate (only works in Azure)
|
||||
|
||||
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
|
||||
to authenticate to Azure Storage instead of a SAS token or account key.
|
||||
|
||||
If the VM(SS) on which this program is running has a system-assigned identity, it will
|
||||
be used by default. If the resource has no system-assigned but exactly one user-assigned identity,
|
||||
the user-assigned identity will be used by default. If the resource has multiple user-assigned
|
||||
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
|
||||
msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "msi_object_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "msi_client_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "msi_mi_res_id",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_emulator",
|
||||
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
|
||||
@@ -90,8 +129,7 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
||||
Default: defaultUploadCutoff,
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
@@ -129,6 +167,24 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "archive_tier_delete",
|
||||
Default: false,
|
||||
Help: fmt.Sprintf(`Delete archive tier blobs before overwriting.
|
||||
|
||||
Archive tier blobs cannot be updated. So without this flag, if you
|
||||
attempt to update an archive tier blob, then rclone will produce the
|
||||
error:
|
||||
|
||||
%v
|
||||
|
||||
With this flag set then before rclone attempts to overwrite an archive
|
||||
tier blob, it will delete the existing blob before uploading its
|
||||
replacement. This has the potential for data loss if the upload fails
|
||||
(unlike updating a normal blob) and also may cost more since deleting
|
||||
archive tier blobs early may be chargable.
|
||||
`, errCantUpdateArchiveTierBlobs),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't store MD5 checksum with object metadata.
|
||||
@@ -161,25 +217,48 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightPeriod),
|
||||
}, {
|
||||
Name: "public_access",
|
||||
Help: "Public access level of a container: blob, container.",
|
||||
Default: string(azblob.PublicAccessNone),
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: string(azblob.PublicAccessNone),
|
||||
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value",
|
||||
}, {
|
||||
Value: string(azblob.PublicAccessBlob),
|
||||
Help: "Blob data within this container can be read via anonymous request.",
|
||||
}, {
|
||||
Value: string(azblob.PublicAccessContainer),
|
||||
Help: "Allow full public read access for container and blob data.",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Account string `config:"account"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
Key string `config:"key"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
PublicAccess string `config:"public_access"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@@ -187,6 +266,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
@@ -197,8 +277,10 @@ type Fs struct {
|
||||
isLimited bool // if limited to one container
|
||||
cache *bucket.Cache // cache for container creation status
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
imdsPacer *fs.Pacer // Same but for IMDS
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
pool *pool.Pool // memory pool
|
||||
publicAccess azblob.PublicAccessType // Container Public Access Level
|
||||
}
|
||||
|
||||
// Object describes an azure object
|
||||
@@ -272,9 +354,22 @@ func validateAccessTier(tier string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// validatePublicAccess checks if azureblob supports use supplied public access level
|
||||
func validatePublicAccess(publicAccess string) bool {
|
||||
switch publicAccess {
|
||||
case string(azblob.PublicAccessNone),
|
||||
string(azblob.PublicAccessBlob),
|
||||
string(azblob.PublicAccessContainer):
|
||||
// valid cases
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
401, // Unauthorized (e.g. "Token has expired")
|
||||
408, // Request Timeout
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
@@ -284,7 +379,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// FIXME interpret special errors - more to do here
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
@@ -299,6 +397,8 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
} else if httpErr, ok := err.(httpError); ok {
|
||||
return fserrors.ShouldRetryHTTP(httpErr.Response, retryErrorCodes), err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -322,21 +422,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%v must be less than or equal to %v", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||
// to an rclone's http.Client.
|
||||
//
|
||||
@@ -353,6 +438,50 @@ func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
})
|
||||
}
|
||||
|
||||
type servicePrincipalCredentials struct {
|
||||
AppID string `json:"appId"`
|
||||
Password string `json:"password"`
|
||||
Tenant string `json:"tenant"`
|
||||
}
|
||||
|
||||
const azureActiveDirectoryEndpoint = "https://login.microsoftonline.com/"
|
||||
const azureStorageEndpoint = "https://storage.azure.com/"
|
||||
|
||||
// newServicePrincipalTokenRefresher takes the client ID and secret, and returns a refresh-able access token.
|
||||
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
|
||||
var spCredentials servicePrincipalCredentials
|
||||
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
|
||||
return nil, errors.Wrap(err, "error parsing credentials from JSON file")
|
||||
}
|
||||
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating oauth config")
|
||||
}
|
||||
|
||||
// Create service principal token for Azure Storage.
|
||||
servicePrincipalToken, err := adal.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
spCredentials.AppID,
|
||||
spCredentials.Password,
|
||||
azureStorageEndpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating service principal token")
|
||||
}
|
||||
|
||||
// Wrap token inside a refresher closure.
|
||||
var tokenRefresher azblob.TokenRefresher = func(credential azblob.TokenCredential) time.Duration {
|
||||
if err := servicePrincipalToken.Refresh(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
refreshedToken := servicePrincipalToken.Token()
|
||||
credential.SetToken(refreshedToken.AccessToken)
|
||||
exp := refreshedToken.Expires().Sub(time.Now().Add(2 * time.Minute))
|
||||
return exp
|
||||
}
|
||||
|
||||
return tokenRefresher, nil
|
||||
}
|
||||
|
||||
// newPipeline creates a Pipeline using the specified credentials and options.
|
||||
//
|
||||
// this code was copied from azblob.NewPipeline
|
||||
@@ -379,8 +508,7 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -388,10 +516,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: upload cutoff")
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: chunk size")
|
||||
@@ -410,21 +534,31 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
if !validatePublicAccess((opt.PublicAccess)) {
|
||||
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
|
||||
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
imdsPacer: fs.NewPacer(ctx, pacer.NewAzureIMDS()),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
client: fshttp.NewClient(ctx),
|
||||
cache: bucket.NewCache(),
|
||||
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
fs.Config.Transfers,
|
||||
ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
f.publicAccess = azblob.PublicAccessType(opt.PublicAccess)
|
||||
f.imdsPacer.SetRetries(5) // per IMDS documentation
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -433,7 +567,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
@@ -451,6 +585,76 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
case opt.UseMSI:
|
||||
var token adal.Token
|
||||
var userMSI *userMSI = &userMSI{}
|
||||
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||
if len(opt.MSIClientID) > 0 {
|
||||
if len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||
}
|
||||
userMSI.Type = msiClientID
|
||||
userMSI.Value = opt.MSIClientID
|
||||
}
|
||||
if len(opt.MSIObjectID) > 0 {
|
||||
if len(opt.MSIClientID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||
}
|
||||
userMSI.Type = msiObjectID
|
||||
userMSI.Value = opt.MSIObjectID
|
||||
}
|
||||
if len(opt.MSIResourceID) > 0 {
|
||||
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 {
|
||||
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||
}
|
||||
userMSI.Type = msiResourceID
|
||||
userMSI.Value = opt.MSIResourceID
|
||||
}
|
||||
} else {
|
||||
userMSI = nil
|
||||
}
|
||||
err = f.imdsPacer.Call(func() (bool, error) {
|
||||
// Retry as specified by the documentation:
|
||||
// https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#retry-guidance
|
||||
token, err = GetMSIToken(ctx, userMSI)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to acquire MSI token")
|
||||
}
|
||||
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
|
||||
fs.Debugf(f, "Token refresher called.")
|
||||
var refreshedToken adal.Token
|
||||
err := f.imdsPacer.Call(func() (bool, error) {
|
||||
refreshedToken, err = GetMSIToken(ctx, userMSI)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// Failed to refresh.
|
||||
return 0
|
||||
}
|
||||
credential.SetToken(refreshedToken.AccessToken)
|
||||
now := time.Now().UTC()
|
||||
// Refresh one minute before expiry.
|
||||
refreshAt := refreshedToken.Expires().UTC().Add(-1 * time.Minute)
|
||||
fs.Debugf(f, "Acquired new token that expires at %v; refreshing in %d s", refreshedToken.Expires(),
|
||||
int(refreshAt.Sub(now).Seconds()))
|
||||
if now.After(refreshAt) {
|
||||
// Acquired a causality violation.
|
||||
return 0
|
||||
}
|
||||
return refreshAt.Sub(now)
|
||||
})
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
if err != nil {
|
||||
@@ -482,8 +686,27 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
}
|
||||
case opt.ServicePrincipalFile != "":
|
||||
// Create a standard URL.
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
// Try loading service principal credentials from file.
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service principal credentials file")
|
||||
}
|
||||
// Create a token refresher from service principal credentials.
|
||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create a service principal token")
|
||||
}
|
||||
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
|
||||
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
|
||||
serviceURL = azblob.NewServiceURL(*u, pipe)
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
return nil, errors.New("No authentication method configured")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
|
||||
@@ -524,7 +747,7 @@ func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItemInternal) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -581,7 +804,7 @@ func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
type listFn func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error
|
||||
|
||||
// list lists the objects into the function supplied from
|
||||
// the container and root supplied
|
||||
@@ -621,7 +844,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -680,7 +903,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory bool) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItemInternal, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{})
|
||||
return d, nil
|
||||
@@ -692,9 +915,27 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Check to see if this is a limited container and the container is not found
|
||||
func (f *Fs) containerOK(container string) bool {
|
||||
if !f.isLimited {
|
||||
return true
|
||||
}
|
||||
f.cntURLcacheMu.Lock()
|
||||
defer f.cntURLcacheMu.Unlock()
|
||||
for limitedContainer := range f.cntURLcache {
|
||||
if container == limitedContainer {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
if !f.containerOK(container) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -775,7 +1016,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
container, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -802,6 +1043,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
f.cache.MarkOK(container)
|
||||
}
|
||||
} else {
|
||||
if !f.containerOK(container) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -826,7 +1070,7 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -878,7 +1122,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
}
|
||||
// now try to create the container
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, f.publicAccess)
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
@@ -895,7 +1139,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
@@ -903,7 +1147,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
|
||||
empty := true
|
||||
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -933,10 +1177,10 @@ func (f *Fs) deleteContainer(ctx context.Context, container string) error {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -976,7 +1220,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.deleteContainer(ctx, container)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -1008,8 +1252,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var startCopy *azblob.BlobStartCopyFromURLResponse
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options)
|
||||
return f.shouldRetry(err)
|
||||
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options, azblob.AccessTierType(f.opt.AccessTier), nil)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1018,7 +1262,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
copyStatus := startCopy.CopyStatus()
|
||||
for copyStatus == azblob.CopyStatusPending {
|
||||
time.Sleep(1 * time.Second)
|
||||
getMetadata, err := dstBlobURL.GetProperties(ctx, options)
|
||||
getMetadata, err := dstBlobURL.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1036,7 +1280,7 @@ func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
fs.Config.Transfers,
|
||||
f.ci.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
@@ -1123,7 +1367,7 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) {
|
||||
metadata := info.Metadata
|
||||
size := *info.Properties.ContentLength
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
@@ -1169,8 +1413,8 @@ func (o *Object) readMetaData() (err error) {
|
||||
ctx := context.Background()
|
||||
var blobProperties *azblob.BlobGetPropertiesResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
blobProperties, err = blob.GetProperties(ctx, options)
|
||||
return o.fs.shouldRetry(err)
|
||||
blobProperties, err = blob.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{})
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
||||
@@ -1204,8 +1448,8 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
|
||||
blob := o.getBlobReference()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1245,15 +1489,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
blob := o.getBlobReference()
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
var dowloadResponse *azblob.DownloadResponse
|
||||
var downloadResponse *azblob.DownloadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
||||
return o.fs.shouldRetry(err)
|
||||
downloadResponse, err = blob.Download(ctx, offset, count, ac, false, azblob.ClientProvidedKeyOptions{})
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
}
|
||||
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
return in, nil
|
||||
}
|
||||
|
||||
@@ -1278,12 +1522,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// readSeeker joins an io.Reader and an io.Seeker
|
||||
type readSeeker struct {
|
||||
io.Reader
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
// increment the slice passed in as LSB binary
|
||||
func increment(xs []byte) {
|
||||
for i, digit := range xs {
|
||||
@@ -1296,153 +1534,69 @@ func increment(xs []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
|
||||
type poolWrapper struct {
|
||||
pool *pool.Pool
|
||||
bufToken chan struct{}
|
||||
runToken chan struct{}
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
// Calculate correct chunkSize
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
totalParts := -1
|
||||
|
||||
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
||||
// blocks) and this is bigger than the max uncommitted block
|
||||
// size (9.52 TB) so we do not need to part commit block lists
|
||||
// or garbage collect uncommitted blocks.
|
||||
//
|
||||
// See: https://docs.microsoft.com/en-gb/rest/api/storageservices/put-block
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 4MB). With a maximum number of parts (50,000) this will be a file of
|
||||
// 195GB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
o.fs.opt.ChunkSize, fs.SizeSuffix(chunkSize*maxTotalParts))
|
||||
})
|
||||
} else {
|
||||
// Adjust partSize until the number of parts is small enough.
|
||||
if size/chunkSize >= maxTotalParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
chunkSize = (((size / maxTotalParts) >> 20) + 1) << 20
|
||||
}
|
||||
if chunkSize > int64(maxChunkSize) {
|
||||
return errors.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), totalParts, fs.SizeSuffix(chunkSize/2))
|
||||
}
|
||||
totalParts = int(size / chunkSize)
|
||||
if size%chunkSize != 0 {
|
||||
totalParts++
|
||||
}
|
||||
// newPoolWrapper creates an azblob.TransferManager that will use a
|
||||
// pool.Pool with maximum concurrency as specified.
|
||||
func (f *Fs) newPoolWrapper(concurrency int) azblob.TransferManager {
|
||||
return &poolWrapper{
|
||||
pool: f.pool,
|
||||
bufToken: make(chan struct{}, concurrency),
|
||||
runToken: make(chan struct{}, concurrency),
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
||||
// Get implements TransferManager.Get().
|
||||
func (pw *poolWrapper) Get() []byte {
|
||||
pw.bufToken <- struct{}{}
|
||||
return pw.pool.Get()
|
||||
}
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
// Put implements TransferManager.Put().
|
||||
func (pw *poolWrapper) Put(b []byte) {
|
||||
pw.pool.Put(b)
|
||||
<-pw.bufToken
|
||||
}
|
||||
|
||||
// Upload the chunks
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = size // remaining size in file for logging only, -1 if size < 0
|
||||
position = int64(0) // position in file
|
||||
memPool = o.fs.getMemoryPool(chunkSize) // pool to get memory from
|
||||
finished = false // set when we have read EOF
|
||||
blocks []string // list of blocks for finalize
|
||||
blockBlobURL = blob.ToBlockBlobURL() // Get BlockBlobURL, we will use default pipeline here
|
||||
ac = azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
|
||||
)
|
||||
for part := 0; !finished; part++ {
|
||||
// Get a block of memory from the pool and a token which limits concurrency
|
||||
o.fs.uploadToken.Get()
|
||||
buf := memPool.Get()
|
||||
// Run implements TransferManager.Run().
|
||||
func (pw *poolWrapper) Run(f func()) {
|
||||
pw.runToken <- struct{}{}
|
||||
go func() {
|
||||
f()
|
||||
<-pw.runToken
|
||||
}()
|
||||
}
|
||||
|
||||
free := func() {
|
||||
memPool.Put(buf) // return the buf
|
||||
o.fs.uploadToken.Put() // return the token
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 { // end if no data
|
||||
free()
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return errors.Wrap(err, "multipart upload failed to read source")
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
|
||||
blocks = append(blocks, blockID)
|
||||
|
||||
// Transfer the chunk
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
transactionalMD5 := md5sum[:]
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
bufferReader := bytes.NewReader(buf)
|
||||
wrappedReader := wrap(bufferReader)
|
||||
rs := readSeeker{wrappedReader, bufferReader}
|
||||
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// ready for next block
|
||||
if size >= 0 {
|
||||
remaining -= chunkSize
|
||||
}
|
||||
position += chunkSize
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blockBlobURL.CommitBlockList(ctx, blocks, *httpHeaders, o.meta, azblob.BlobAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
return nil
|
||||
// Close implements TransferManager.Close().
|
||||
func (pw *poolWrapper) Close() {
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if o.accessTier == azblob.AccessTierArchive {
|
||||
if o.fs.opt.ArchiveTierDelete {
|
||||
fs.Debugf(o, "deleting archive tier blob before updating")
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete archive blob before updating")
|
||||
}
|
||||
} else {
|
||||
return errCantUpdateArchiveTierBlobs
|
||||
}
|
||||
}
|
||||
container, _ := o.split()
|
||||
err = o.fs.makeContainer(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
@@ -1451,11 +1605,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
blob := o.getBlobReference()
|
||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||
httpHeaders.ContentType = fs.MimeType(ctx, o)
|
||||
// Compute the Content-MD5 of the file, for multiparts uploads it
|
||||
httpHeaders.ContentType = fs.MimeType(ctx, src)
|
||||
|
||||
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
// Note: If multipart, an MD5 checksum will also be computed for each uploaded block
|
||||
// in order to validate its integrity during transport
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
@@ -1469,31 +1622,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
MaxBuffers: 4,
|
||||
MaxBuffers: uploadConcurrency,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
}
|
||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multpart upload to work around.
|
||||
// See: https://github.com/rclone/rclone/issues/2653
|
||||
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
multipartUpload = true
|
||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
|
||||
}
|
||||
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if multipartUpload {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(ctx, in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
// Write a small blob in one transaction
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
||||
}
|
||||
return o.fs.shouldRetry(err)
|
||||
// Stream contents of the reader object to the given blob URL
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1521,7 +1661,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1550,7 +1690,7 @@ func (o *Object) SetTier(tier string) error {
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
@@ -27,11 +29,36 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"password": "my secret",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotNil(t, tokenRefresher)
|
||||
}
|
||||
}
|
||||
|
||||
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris js !go1.13
|
||||
// +build plan9 solaris js !go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
137
backend/azureblob/imds.go
Normal file
137
backend/azureblob/imds.go
Normal file
@@ -0,0 +1,137 @@
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
const (
|
||||
azureResource = "https://storage.azure.com"
|
||||
imdsAPIVersion = "2018-02-01"
|
||||
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
)
|
||||
|
||||
// This custom type is used to add the port the test server has bound to
|
||||
// to the request context.
|
||||
type testPortKey string
|
||||
|
||||
type msiIdentifierType int
|
||||
|
||||
const (
|
||||
msiClientID msiIdentifierType = iota
|
||||
msiObjectID
|
||||
msiResourceID
|
||||
)
|
||||
|
||||
type userMSI struct {
|
||||
Type msiIdentifierType
|
||||
Value string
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
Response *http.Response
|
||||
}
|
||||
|
||||
func (e httpError) Error() string {
|
||||
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||
}
|
||||
|
||||
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||
// Metadata Service.
|
||||
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||
result := adal.Token{}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||
return result, err
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("resource", azureResource)
|
||||
params.Set("api-version", imdsAPIVersion)
|
||||
|
||||
// Specify user-assigned identity if requested.
|
||||
if identity != nil {
|
||||
switch identity.Type {
|
||||
case msiClientID:
|
||||
params.Set("client_id", identity.Value)
|
||||
case msiObjectID:
|
||||
params.Set("object_id", identity.Value)
|
||||
case msiResourceID:
|
||||
params.Set("mi_res_id", identity.Value)
|
||||
default:
|
||||
// If this happens, the calling function and this one don't agree on
|
||||
// what valid ID types exist.
|
||||
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||
}
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
// The Metadata header is required by all calls to IMDS.
|
||||
req.Header.Set("Metadata", "true")
|
||||
|
||||
// If this function is run in a test, query the test server instead of IMDS.
|
||||
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||
if isTest {
|
||||
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||
req.Host = req.URL.Host
|
||||
}
|
||||
|
||||
// Send request
|
||||
httpClient := fshttp.NewClient(ctx)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return result, errors.Wrap(err, "MSI is not enabled on this VM")
|
||||
}
|
||||
defer func() { // resp and Body should not be nil
|
||||
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||
}
|
||||
}()
|
||||
// Check if the status code indicates success
|
||||
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||
switch resp.StatusCode {
|
||||
case 200, 201, 202:
|
||||
break
|
||||
default:
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||
return result, httpError{Response: resp}
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, errors.Wrap(err, "Couldn't read IMDS response")
|
||||
}
|
||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
|
||||
// This would be a good place to persist the token if a large number of rclone
|
||||
// invocations are being made in a short amount of time. If the token is
|
||||
// persisted, the azureblob code will need to check for expiry before every
|
||||
// storage API call.
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
117
backend/azureblob/imds_test.go
Normal file
117
backend/azureblob/imds_test.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
require.NoError(t, err)
|
||||
parameters := r.URL.Query()
|
||||
(*actual)["path"] = r.URL.Path
|
||||
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||
(*actual)["method"] = r.Method
|
||||
for paramName := range parameters {
|
||||
(*actual)[paramName] = parameters.Get(paramName)
|
||||
}
|
||||
// Make response.
|
||||
response := adal.Token{}
|
||||
responseBytes, err := json.Marshal(response)
|
||||
require.NoError(t, err)
|
||||
_, err = w.Write(responseBytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestManagedIdentity(t *testing.T) {
|
||||
// test user-assigned identity specifiers to use
|
||||
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||
tests := []struct {
|
||||
identity *userMSI
|
||||
identityParameterName string
|
||||
expectedAbsent []string
|
||||
}{
|
||||
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||
}
|
||||
alwaysExpected := map[string]string{
|
||||
"path": "/metadata/identity/oauth2/token",
|
||||
"resource": "https://storage.azure.com",
|
||||
"Metadata": "true",
|
||||
"api-version": "2018-02-01",
|
||||
"method": "GET",
|
||||
}
|
||||
for _, test := range tests {
|
||||
actual := make(map[string]string, 10)
|
||||
testServer := httptest.NewServer(handler(t, &actual))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, test.identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate expected query parameters present
|
||||
expected := make(map[string]string)
|
||||
for k, v := range alwaysExpected {
|
||||
expected[k] = v
|
||||
}
|
||||
if test.identity != nil {
|
||||
expected[test.identityParameterName] = test.identity.Value
|
||||
}
|
||||
|
||||
for key := range expected {
|
||||
value, exists := actual[key]
|
||||
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||
test.identityParameterName, key) {
|
||||
assert.Equalf(t, expected[key], value,
|
||||
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate unexpected query parameters absent
|
||||
for _, key := range test.expectedAbsent {
|
||||
_, exists := actual[key]
|
||||
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errorHandler(resultCode int) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Test error generated", resultCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIMDSErrors(t *testing.T) {
|
||||
errorCodes := []int{404, 429, 500}
|
||||
for _, code := range errorCodes {
|
||||
testServer := httptest.NewServer(errorHandler(code))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, nil)
|
||||
require.Error(t, err)
|
||||
httpErr, ok := err.(httpError)
|
||||
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||
}
|
||||
}
|
||||
174
backend/b2/b2.go
174
backend/b2/b2.go
@@ -44,8 +44,10 @@ const (
|
||||
timeHeader = headerPrefix + timeKey
|
||||
sha1Key = "large_file_sha1"
|
||||
sha1Header = "X-Bz-Content-Sha1"
|
||||
sha1InfoHeader = headerPrefix + sha1Key
|
||||
testModeHeader = "X-Bz-Test-Mode"
|
||||
idHeader = "X-Bz-File-Id"
|
||||
nameHeader = "X-Bz-File-Name"
|
||||
timestampHeader = "X-Bz-Upload-Timestamp"
|
||||
retryAfterHeader = "Retry-After"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
@@ -121,7 +123,7 @@ This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
|
||||
Any files larger than this that need to be server side copied will be
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 4.6GB.`,
|
||||
@@ -153,7 +155,9 @@ to start uploading.`,
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
This is probably only useful for a public bucket.
|
||||
Rclone works with private buckets by sending an "Authorization" header.
|
||||
If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -214,6 +218,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
rootBucket string // bucket part of root (if any)
|
||||
@@ -290,7 +295,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
401, // Unauthorized (e.g. "Token has expired")
|
||||
408, // Request Timeout
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
@@ -300,7 +305,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetryNoAuth returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// For 429 or 503 errors look at the Retry-After: header and
|
||||
// set the retry appropriately, starting with a minimum of 1
|
||||
// second if it isn't set.
|
||||
@@ -331,7 +339,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
return f.shouldRetryNoReauth(resp, err)
|
||||
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
@@ -391,14 +399,17 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.UploadCutoff < opt.ChunkSize {
|
||||
opt.UploadCutoff = opt.ChunkSize
|
||||
fs.Infof(nil, "b2: raising upload cutoff to chunk size: %v", opt.UploadCutoff)
|
||||
}
|
||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
||||
@@ -416,20 +427,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
ci: ci,
|
||||
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
|
||||
cache: bucket.NewCache(),
|
||||
_bucketID: make(map[string]string, 1),
|
||||
_bucketType: make(map[string]string, 1),
|
||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
fs.Config.Transfers,
|
||||
ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
@@ -439,7 +452,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
// Set the test flag if required
|
||||
if opt.TestMode != "" {
|
||||
testMode := strings.TrimSpace(opt.TestMode)
|
||||
@@ -469,12 +482,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
// File doesn't exist so return old f
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
@@ -497,7 +507,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
|
||||
return f.shouldRetryNoReauth(resp, err)
|
||||
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to authenticate")
|
||||
@@ -702,7 +712,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote := file.Name[len(prefix):]
|
||||
// Check for directory
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
if isDirectory && len(remote) > 1 {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
if addBucket {
|
||||
@@ -1168,10 +1178,10 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
wg.Add(f.ci.Transfers)
|
||||
for i := 0; i < f.ci.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for object := range toBeDeleted {
|
||||
@@ -1183,7 +1193,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(err)
|
||||
tr.Done(ctx, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -1211,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
last = remote
|
||||
tr.Done(nil)
|
||||
tr.Done(ctx, nil)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
@@ -1234,7 +1244,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.purge(ctx, "", true)
|
||||
}
|
||||
|
||||
// copy does a server side copy from dstObj <- srcObj
|
||||
// copy does a server-side copy from dstObj <- srcObj
|
||||
//
|
||||
// If newInfo is nil then the metadata will be copied otherwise it
|
||||
// will be replaced with newInfo
|
||||
@@ -1291,7 +1301,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
return dstObj.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -1440,7 +1450,7 @@ func (o *Object) Size() int64 {
|
||||
// Make sure it is lower case
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (eg Cyberduck) use this
|
||||
// Some tools (e.g. Cyberduck) use this
|
||||
func cleanSHA1(sha1 string) (out string) {
|
||||
out = strings.ToLower(sha1)
|
||||
const unverified = "unverified:"
|
||||
@@ -1494,8 +1504,11 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
|
||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||
}
|
||||
|
||||
// getMetaData gets the metadata from the object unconditionally
|
||||
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
// getMetaDataListing gets the metadata from the object unconditionally from the listing
|
||||
//
|
||||
// Note that listing is a class C transaction which costs more than
|
||||
// the B transaction used in getMetaData
|
||||
func (o *Object) getMetaDataListing(ctx context.Context) (info *api.File, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
maxSearched := 1
|
||||
var timestamp api.Timestamp
|
||||
@@ -1528,6 +1541,19 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getMetaData gets the metadata from the object unconditionally
|
||||
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
// If using versions and have a version suffix, need to list the directory to find the correct versions
|
||||
if o.fs.opt.Versions {
|
||||
timestamp, _ := api.RemoveVersion(o.remote)
|
||||
if !timestamp.IsZero() {
|
||||
return o.getMetaDataListing(ctx)
|
||||
}
|
||||
}
|
||||
_, info, err = o.getOrHead(ctx, "HEAD", nil)
|
||||
return info, err
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
@@ -1657,12 +1683,11 @@ func (file *openFile) Close() (err error) {
|
||||
// Check it satisfies the interfaces
|
||||
var _ io.ReadCloser = &openFile{}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Options: options,
|
||||
Method: method,
|
||||
Options: options,
|
||||
NoResponse: method == "HEAD",
|
||||
}
|
||||
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
@@ -1680,37 +1705,74 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
bucket, bucketPath := o.split()
|
||||
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
// 404 for files, 400 for directories
|
||||
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
|
||||
return nil, nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
|
||||
}
|
||||
|
||||
// Parse the time out of the headers if possible
|
||||
err = o.parseTimeString(resp.Header.Get(timeHeader))
|
||||
// NB resp may be Open here - don't return err != nil without closing
|
||||
|
||||
// Convert the Headers into an api.File
|
||||
var uploadTimestamp api.Timestamp
|
||||
err = uploadTimestamp.UnmarshalJSON([]byte(resp.Header.Get(timestampHeader)))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timestampHeader+" header: %v", err)
|
||||
}
|
||||
var Info = make(map[string]string)
|
||||
for k, vs := range resp.Header {
|
||||
k = strings.ToLower(k)
|
||||
for _, v := range vs {
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
Info[k[len(headerPrefix):]] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
info = &api.File{
|
||||
ID: resp.Header.Get(idHeader),
|
||||
Name: resp.Header.Get(nameHeader),
|
||||
Action: "upload",
|
||||
Size: resp.ContentLength,
|
||||
UploadTimestamp: uploadTimestamp,
|
||||
SHA1: resp.Header.Get(sha1Header),
|
||||
ContentType: resp.Header.Get("Content-Type"),
|
||||
Info: Info,
|
||||
}
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
// length read from the listing.
|
||||
if info.Size < 0 {
|
||||
info.Size = o.size
|
||||
}
|
||||
return resp, info, nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
|
||||
resp, info, err := o.getOrHead(ctx, "GET", options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Don't check length or hash or metadata on partial content
|
||||
if resp.StatusCode == http.StatusPartialContent {
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
err = o.decodeMetaData(info)
|
||||
if err != nil {
|
||||
_ = resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
// Read sha1 from header if it isn't set
|
||||
if o.sha1 == "" {
|
||||
o.sha1 = resp.Header.Get(sha1Header)
|
||||
fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
|
||||
// if sha1 header is "none" (in big files), then need
|
||||
// to read it from the metadata
|
||||
if o.sha1 == "none" {
|
||||
o.sha1 = resp.Header.Get(sha1InfoHeader)
|
||||
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
|
||||
}
|
||||
o.sha1 = cleanSHA1(o.sha1)
|
||||
}
|
||||
// Don't check length or hash on partial content
|
||||
if resp.StatusCode == http.StatusPartialContent {
|
||||
return resp.Body, nil
|
||||
}
|
||||
return newOpenFile(o, resp), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -84,20 +84,20 @@ func init() {
|
||||
Name: "box",
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||
var err error
|
||||
// If using box config.json, use JWT auth
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
// Else, if not using an access token, use oauth2
|
||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||
err = oauthutil.Config("box", name, m, oauthConfig, nil)
|
||||
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
@@ -153,7 +153,7 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
@@ -169,7 +169,7 @@ func refreshJWTToken(jsonFile string, boxSubType string, name string, m configma
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(ctx)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
@@ -317,10 +317,13 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
authRetry := false
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") {
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
@@ -339,7 +342,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
info = item
|
||||
return true
|
||||
}
|
||||
@@ -372,8 +375,7 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -387,28 +389,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
root = parsePath(root)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(ctx)
|
||||
var ts *oauthutil.TokenSource
|
||||
// If not using an accessToken, create an oauth client and tokensource
|
||||
if opt.AccessToken == "" {
|
||||
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
|
||||
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// If using an accessToken, set the Authorization header
|
||||
@@ -424,7 +427,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
@@ -463,7 +466,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&tempF)
|
||||
f.features.Fill(ctx, &tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
@@ -514,7 +517,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
}
|
||||
@@ -548,7 +551,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -585,7 +588,7 @@ OUTER:
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -740,7 +743,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -767,7 +770,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
@@ -791,7 +794,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -839,7 +842,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -877,7 +880,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -895,7 +898,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read user info")
|
||||
@@ -909,7 +912,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -945,7 +948,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1008,12 +1011,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return info.SharedLink.URL, err
|
||||
}
|
||||
|
||||
// deletePermanently permenently deletes a trashed file
|
||||
// deletePermanently permanently deletes a trashed file
|
||||
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
@@ -1026,7 +1029,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1048,7 +1051,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list trash")
|
||||
@@ -1182,7 +1185,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
@@ -1215,7 +1218,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1255,7 +1258,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// multpart upload for box
|
||||
// multipart upload for box
|
||||
|
||||
package box
|
||||
|
||||
@@ -44,7 +44,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts.Body = wrap(bytes.NewReader(chunk))
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -109,10 +109,10 @@ outer:
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
delay := defaultDelay
|
||||
var why string
|
||||
@@ -167,7 +167,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
33
backend/cache/cache.go
vendored
33
backend/cache/cache.go
vendored
@@ -68,7 +68,7 @@ func init() {
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "plex_url",
|
||||
@@ -109,7 +109,7 @@ will need to be cleared or unexpected EOF errors will occur.`,
|
||||
}},
|
||||
}, {
|
||||
Name: "info_age",
|
||||
Help: `How long to cache file structure information (directory listings, file size, times etc).
|
||||
Help: `How long to cache file structure information (directory listings, file size, times, etc.).
|
||||
If all write operations are done through the cache then you can safely make
|
||||
this value very large as the cache store will also be updated in real time.`,
|
||||
Default: DefCacheInfoAge,
|
||||
@@ -340,7 +340,7 @@ func parseRootPath(path string) (string, error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -362,7 +362,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||
wrappedFs, wrapErr := cache.Get(remotePath)
|
||||
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||
}
|
||||
@@ -479,7 +479,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||
}
|
||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||
f.tempFs, err = cache.Get(f.opt.TempWritePath)
|
||||
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||
}
|
||||
@@ -506,13 +506,13 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||
pollInterval := make(chan time.Duration, 1)
|
||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
|
||||
doChangeNotify(ctx, f.receiveChangeNotify, pollInterval)
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
DuplicateFiles: false, // storage doesn't permit this
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// override only those features that use a temp fs and it doesn't support them
|
||||
//f.features.ChangeNotify = f.ChangeNotify
|
||||
if f.opt.TempWritePath != "" {
|
||||
@@ -581,7 +581,7 @@ Some valid examples are:
|
||||
"0:10" -> the first ten chunks
|
||||
|
||||
Any parameter with a key that starts with "file" can be used to
|
||||
specify files to fetch, eg
|
||||
specify files to fetch, e.g.
|
||||
|
||||
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
|
||||
|
||||
@@ -1236,7 +1236,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
||||
|
||||
@@ -1517,7 +1517,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return f.put(ctx, in, src, options, do)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
||||
|
||||
@@ -1594,7 +1594,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return co, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
||||
|
||||
@@ -1895,6 +1895,16 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
do := f.Fs.Features().Shutdown
|
||||
if do == nil {
|
||||
return nil
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "stats",
|
||||
@@ -1939,4 +1949,5 @@ var (
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
)
|
||||
|
||||
13
backend/cache/cache_internal_test.go
vendored
13
backend/cache/cache_internal_test.go
vendored
@@ -892,7 +892,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
m.Set("type", "cache")
|
||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||
} else {
|
||||
remoteType := config.FileGet(remote, "type", "")
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
@@ -903,14 +903,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
m.Set("password", cryptPassword1)
|
||||
m.Set("password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := config.FileGet(remote, "remote", "")
|
||||
remoteRemote := config.FileGet(remote, "remote")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := config.FileGet(remoteWrapping, "type", "")
|
||||
remoteType := config.FileGet(remoteWrapping, "type")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
@@ -925,14 +925,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
fs.Config.LowLevelRetries = 1
|
||||
ci := fs.GetConfig(context.Background())
|
||||
ci.LowLevelRetries = 1
|
||||
|
||||
// Instantiate root
|
||||
if purge {
|
||||
boltDb.PurgeTempUploads()
|
||||
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
||||
}
|
||||
f, err := cache.NewFs(remote, id, m)
|
||||
f, err := cache.NewFs(context.Background(), remote, id, m)
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
@@ -1033,7 +1034,7 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
|
||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
|
||||
obj, err = f.Put(context.Background(), in1, objInfo1)
|
||||
_, err = f.Put(context.Background(), in1, objInfo1)
|
||||
require.NoError(t, err)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
@@ -467,9 +468,15 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
return obj
|
||||
}
|
||||
billyObj := newFile("billy")
|
||||
billyTxn := billyObj.(*Object).xactID
|
||||
if f.useNoRename {
|
||||
require.True(t, billyTxn != "")
|
||||
} else {
|
||||
require.True(t, billyTxn == "")
|
||||
}
|
||||
|
||||
billyChunkName := func(chunkNo int) string {
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn)
|
||||
}
|
||||
|
||||
err := f.Mkdir(ctx, billyChunkName(1))
|
||||
@@ -486,11 +493,13 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
// accessing chunks in strict mode is prohibited
|
||||
f.opt.FailHard = true
|
||||
billyChunk4Name := billyChunkName(4)
|
||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
||||
_, err = f.base.NewObject(ctx, billyChunk4Name)
|
||||
require.NoError(t, err)
|
||||
_, err = f.NewObject(ctx, billyChunk4Name)
|
||||
assertOverlapError(err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
|
||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, billyChunk4)
|
||||
|
||||
@@ -519,7 +528,8 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
|
||||
// recreate billy in case it was anyhow corrupted
|
||||
willyObj := newFile("willy")
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
|
||||
willyTxn := willyObj.(*Object).xactID
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn)
|
||||
f.opt.FailHard = false
|
||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
||||
f.opt.FailHard = true
|
||||
@@ -560,17 +570,20 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
contents := random.String(100)
|
||||
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
||||
filename = path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
||||
txnID = chunkObj.xactID
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
f.opt.FailHard = false
|
||||
file, fileName := newFile(f, "wreaker")
|
||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
|
||||
file, fileName, fileTxn := newFile(f, "wreaker")
|
||||
wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn))
|
||||
|
||||
f.opt.FailHard = false
|
||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
||||
@@ -649,7 +662,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
}
|
||||
}
|
||||
|
||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
|
||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "")
|
||||
require.NoError(t, err)
|
||||
todaysMeta := string(metaData)
|
||||
runSubtest(todaysMeta, "today")
|
||||
@@ -663,6 +676,174 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
runSubtest(futureMeta, "future")
|
||||
}
|
||||
|
||||
// Test that chunker refuses to change on objects with future/unknown metadata
|
||||
func testFutureProof(t *testing.T, f *Fs) {
|
||||
if f.opt.MetaFormat == "none" {
|
||||
t.Skip("this test requires metadata support")
|
||||
}
|
||||
|
||||
saveOpt := f.opt
|
||||
ctx := context.Background()
|
||||
f.opt.FailHard = true
|
||||
const dir = "future"
|
||||
const file = dir + "/test"
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
putPart := func(name string, part int, data, msg string) {
|
||||
if part > 0 {
|
||||
name = f.makeChunkName(name, part-1, "", "")
|
||||
}
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||
assert.NotNil(t, obj, msg)
|
||||
}
|
||||
|
||||
// simulate chunked object from future
|
||||
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
|
||||
putPart(file, 0, meta, "metaobject")
|
||||
putPart(file, 1, "abc", "chunk1")
|
||||
putPart(file, 2, "def", "chunk2")
|
||||
putPart(file, 3, "ghi", "chunk3")
|
||||
|
||||
// List should succeed
|
||||
ls, err := f.List(ctx, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(ls))
|
||||
assert.Equal(t, int64(9), ls[0].Size())
|
||||
|
||||
// NewObject should succeed
|
||||
obj, err := f.NewObject(ctx, file)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, file, obj.Remote())
|
||||
assert.Equal(t, int64(9), obj.Size())
|
||||
|
||||
// Hash must fail
|
||||
_, err = obj.Hash(ctx, hash.SHA1)
|
||||
assert.Equal(t, ErrMetaUnknown, err)
|
||||
|
||||
// Move must fail
|
||||
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
|
||||
assert.Nil(t, mobj)
|
||||
assert.Error(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "please upgrade rclone")
|
||||
}
|
||||
|
||||
// Put must fail
|
||||
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
|
||||
buf := bytes.NewBufferString("abc")
|
||||
_, err = f.Put(ctx, buf, oi)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Rcat must fail
|
||||
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||
assert.Nil(t, robj)
|
||||
assert.NotNil(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "please upgrade rclone")
|
||||
}
|
||||
}
|
||||
|
||||
// The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming
|
||||
// If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly
|
||||
func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
||||
if !f.useMeta {
|
||||
t.Skip("Can't do norename transactions without metadata")
|
||||
}
|
||||
const dir = "backcomp"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
saveUseNoRename := f.useNoRename
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
f.useNoRename = saveUseNoRename
|
||||
}()
|
||||
f.opt.ChunkSize = fs.SizeSuffix(10)
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
contents := random.String(250)
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
}
|
||||
|
||||
f.opt.FailHard = false
|
||||
f.useNoRename = false
|
||||
file, fileName := newFile(f, "renamefile")
|
||||
|
||||
f.opt.FailHard = false
|
||||
item := fstest.NewItem(fileName, contents, modTime)
|
||||
|
||||
var items []fstest.Item
|
||||
items = append(items, item)
|
||||
|
||||
f.useNoRename = true
|
||||
fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision())
|
||||
_, err := f.NewObject(ctx, fileName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.List(ctx, dir)
|
||||
assert.NoError(t, err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
_ = file.Remove(ctx)
|
||||
}
|
||||
|
||||
func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
if !f.useMeta {
|
||||
t.Skip("Can't test norename transactions without metadata")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
const dir = "servermovetest"
|
||||
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir)
|
||||
|
||||
subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1")
|
||||
assert.NoError(t, err)
|
||||
fs1, isChunkerFs := subFs1.(*Fs)
|
||||
assert.True(t, isChunkerFs)
|
||||
fs1.useNoRename = false
|
||||
fs1.opt.ChunkSize = fs.SizeSuffix(3)
|
||||
|
||||
subFs2, err := fs.NewFs(ctx, subRemote+"/subdir2")
|
||||
assert.NoError(t, err)
|
||||
fs2, isChunkerFs := subFs2.(*Fs)
|
||||
assert.True(t, isChunkerFs)
|
||||
fs2.useNoRename = true
|
||||
fs2.opt.ChunkSize = fs.SizeSuffix(3)
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
||||
contents := "abcdef"
|
||||
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||
|
||||
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
||||
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(len(contents)), dstFile.Size())
|
||||
|
||||
r, err := dstFile.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PutLarge", func(t *testing.T) {
|
||||
@@ -686,6 +867,15 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("MetadataInput", func(t *testing.T) {
|
||||
testMetadataInput(t, f)
|
||||
})
|
||||
t.Run("FutureProof", func(t *testing.T) {
|
||||
testFutureProof(t, f)
|
||||
})
|
||||
t.Run("BackwardsCompatibility", func(t *testing.T) {
|
||||
testBackwardsCompatibility(t, f)
|
||||
})
|
||||
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
||||
testChunkerServerSideMove(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -15,10 +15,10 @@ import (
|
||||
|
||||
// Command line flags
|
||||
var (
|
||||
// Invalid characters are not supported by some remotes, eg. Mailru.
|
||||
// Invalid characters are not supported by some remotes, e.g. Mailru.
|
||||
// We enable testing with invalid characters when -remote is not set, so
|
||||
// chunker overlays a local directory, but invalid characters are disabled
|
||||
// by default when -remote is set, eg. when test_all runs backend tests.
|
||||
// by default when -remote is set, e.g. when test_all runs backend tests.
|
||||
// You can still test with invalid characters using the below flag.
|
||||
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
||||
)
|
||||
|
||||
1
backend/compress/.gitignore
vendored
Normal file
1
backend/compress/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
test
|
||||
1416
backend/compress/compress.go
Normal file
1416
backend/compress/compress.go
Normal file
File diff suppressed because it is too large
Load Diff
65
backend/compress/compress_test.go
Normal file
65
backend/compress/compress_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Test Crypt filesystem interface
|
||||
package compress
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
UnimplementableObjectMethods: []string{}}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
// TestRemoteGzip tests GZIP compression
|
||||
func TestRemoteGzip(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||
name := "TestCompressGzip"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -147,7 +147,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
// If salt is "" we use a fixed salt just to make attackers lives
|
||||
// slighty harder than using no salt.
|
||||
//
|
||||
// Note that empty passsword makes all 0x00 keys which is used in the
|
||||
// Note that empty password makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
func (c *Cipher) Key(password, salt string) (err error) {
|
||||
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
||||
@@ -633,11 +633,8 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
// possibly err != nil here, but we will process the
|
||||
// data and the next call to ReadFull will return 0, err
|
||||
// Write nonce to start of block
|
||||
copy(fh.buf, fh.nonce[:])
|
||||
// Encrypt the block using the nonce
|
||||
block := fh.buf
|
||||
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = blockHeaderSize + n
|
||||
fh.nonce.increment()
|
||||
@@ -782,8 +779,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
return ErrorEncryptedFileBadHeader
|
||||
}
|
||||
// Decrypt the block using the nonce
|
||||
block := fh.buf
|
||||
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
if !ok {
|
||||
if err != nil {
|
||||
return err // return pending error as it is likely more accurate
|
||||
|
||||
@@ -30,7 +30,7 @@ func init() {
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "filename_encryption",
|
||||
@@ -76,7 +76,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server side operations (eg copy) to work across different crypt configs.
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
|
||||
Normally this option is not what you want, but if you have two crypts
|
||||
pointing to the same backend you can use it.
|
||||
@@ -101,6 +101,21 @@ names, or for debugging purposes.`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_data_encryption",
|
||||
Help: "Option to either encrypt file data or leave it unencrypted.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "true",
|
||||
Help: "Don't encrypt file data, leave it unencrypted.",
|
||||
},
|
||||
{
|
||||
Value: "false",
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -144,7 +159,7 @@ func NewCipher(m configmap.Mapper) (*Cipher, error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -159,21 +174,21 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
// Make sure to remove trailing . reffering to the current dir
|
||||
// Make sure to remove trailing . referring to the current dir
|
||||
if path.Base(rpath) == "." {
|
||||
rpath = strings.TrimSuffix(rpath, ".")
|
||||
}
|
||||
// Look for a file first
|
||||
var wrappedFs fs.Fs
|
||||
if rpath == "" {
|
||||
wrappedFs, err = cache.Get(remote)
|
||||
wrappedFs, err = cache.Get(ctx, remote)
|
||||
} else {
|
||||
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err = cache.Get(remotePath)
|
||||
wrappedFs, err = cache.Get(ctx, remotePath)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = cache.Get(remotePath)
|
||||
wrappedFs, err = cache.Get(ctx, remotePath)
|
||||
}
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
@@ -199,7 +214,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
}
|
||||
@@ -209,6 +224,7 @@ type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
FilenameEncryption string `config:"filename_encryption"`
|
||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||
NoDataEncryption bool `config:"no_data_encryption"`
|
||||
Password string `config:"password"`
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
@@ -346,6 +362,10 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
if f.opt.NoDataEncryption {
|
||||
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
}
|
||||
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
||||
if err != nil {
|
||||
@@ -384,13 +404,16 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
if srcHash != "" && dstHash != "" {
|
||||
if srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -444,7 +467,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return do(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -469,7 +492,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return f.newObject(oResult), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -495,7 +518,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -617,6 +640,10 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
if f.opt.NoDataEncryption {
|
||||
return src.Hash(ctx, hashType)
|
||||
}
|
||||
|
||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||
// use a limited read so we only read the header
|
||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
@@ -822,9 +849,13 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
||||
size := o.Object.Size()
|
||||
if !o.f.opt.NoDataEncryption {
|
||||
var err error
|
||||
size, err = o.f.cipher.DecryptedSize(size)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
@@ -842,6 +873,10 @@ func (o *Object) UnWrap() fs.Object {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
if o.f.opt.NoDataEncryption {
|
||||
return o.Object.Open(ctx, options...)
|
||||
}
|
||||
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
@@ -917,6 +952,16 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
do := f.Fs.Features().Shutdown
|
||||
if do == nil {
|
||||
return nil
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
//
|
||||
// This encrypts the remote name and adjusts the size
|
||||
@@ -1025,6 +1070,7 @@ var (
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
|
||||
@@ -33,7 +33,7 @@ func (o testWrapper) UnWrap() fs.Object {
|
||||
// Create a temporary local fs to upload things from
|
||||
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||
localFs, err := fs.TemporaryLocalFs()
|
||||
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||
@@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
// saved from the encryptor
|
||||
// saved from the encrypter
|
||||
src := f.newObjectInfo(oi, nonce)
|
||||
|
||||
// Test ObjectInfo methods
|
||||
|
||||
@@ -91,3 +91,26 @@ func TestObfuscate(t *testing.T) {
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
// TestNoDataObfuscate runs integration tests against the remote
|
||||
func TestNoDataObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt4"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,6 +7,8 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -109,6 +111,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
item := &drive.File{
|
||||
Name: "file",
|
||||
MimeType: "application/vnd.google-apps.document",
|
||||
@@ -126,7 +129,7 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.exportExtensions = test.extensions
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item)
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
if test.wantExtension != "" {
|
||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
||||
@@ -194,7 +197,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
@@ -208,7 +211,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
@@ -272,14 +275,15 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
||||
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
ctx := context.Background()
|
||||
srcObj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
@@ -408,6 +412,55 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
obj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
|
||||
checkFile := func(name string) {
|
||||
filePath := filepath.Join(dir, name)
|
||||
fi, err := os.Stat(filePath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(100), fi.Size())
|
||||
err = os.Remove(filePath)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Run("BadID", func(t *testing.T) {
|
||||
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "couldn't find id")
|
||||
})
|
||||
|
||||
t.Run("Directory", func(t *testing.T) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
require.NoError(t, err)
|
||||
err = f.copyID(ctx, rootID, dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't copy directory")
|
||||
})
|
||||
|
||||
t.Run("WithoutDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("WithDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
@@ -424,6 +477,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -77,11 +77,10 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
return false, err
|
||||
}
|
||||
var req *http.Request
|
||||
req, err = http.NewRequest(method, urls, body)
|
||||
req, err = http.NewRequestWithContext(ctx, method, urls, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
@@ -95,7 +94,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
defer googleapi.CloseBody(res)
|
||||
err = googleapi.CheckResponse(res)
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -114,8 +113,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body)
|
||||
req.ContentLength = reqSize
|
||||
totalSize := "*"
|
||||
if rx.ContentLength >= 0 {
|
||||
@@ -204,7 +202,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
||||
again, err := rx.f.shouldRetry(err)
|
||||
again, err := rx.f.shouldRetry(ctx, err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
again = false
|
||||
err = nil
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
44
backend/dropbox/dropbox_internal_test.go
Normal file
44
backend/dropbox/dropbox_internal_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInternalCheckPathLength(t *testing.T) {
|
||||
rep := func(n int, r rune) (out string) {
|
||||
rs := make([]rune, n)
|
||||
for i := range rs {
|
||||
rs[i] = r
|
||||
}
|
||||
return string(rs)
|
||||
}
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
ok bool
|
||||
}{
|
||||
{in: "", ok: true},
|
||||
{in: rep(maxFileNameLength, 'a'), ok: true},
|
||||
{in: rep(maxFileNameLength+1, 'a'), ok: false},
|
||||
{in: rep(maxFileNameLength, '£'), ok: true},
|
||||
{in: rep(maxFileNameLength+1, '£'), ok: false},
|
||||
{in: rep(maxFileNameLength, '☺'), ok: true},
|
||||
{in: rep(maxFileNameLength+1, '☺'), ok: false},
|
||||
{in: rep(maxFileNameLength, '你'), ok: true},
|
||||
{in: rep(maxFileNameLength+1, '你'), ok: false},
|
||||
{in: "/ok/ok", ok: true},
|
||||
{in: "/ok/" + rep(maxFileNameLength, 'a') + "/ok", ok: true},
|
||||
{in: "/ok/" + rep(maxFileNameLength+1, 'a') + "/ok", ok: false},
|
||||
{in: "/ok/" + rep(maxFileNameLength, '£') + "/ok", ok: true},
|
||||
{in: "/ok/" + rep(maxFileNameLength+1, '£') + "/ok", ok: false},
|
||||
{in: "/ok/" + rep(maxFileNameLength, '☺') + "/ok", ok: true},
|
||||
{in: "/ok/" + rep(maxFileNameLength+1, '☺') + "/ok", ok: false},
|
||||
{in: "/ok/" + rep(maxFileNameLength, '你') + "/ok", ok: true},
|
||||
{in: "/ok/" + rep(maxFileNameLength+1, '你') + "/ok", ok: false},
|
||||
} {
|
||||
|
||||
err := checkPathLength(test.in)
|
||||
assert.Equal(t, test.ok, err == nil, test.in)
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Detect this error which the integration tests provoke
|
||||
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
//
|
||||
@@ -48,6 +51,41 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||
|
||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, leaf, directoryID, nil
|
||||
}
|
||||
|
||||
func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
||||
request := FileInfoRequest{
|
||||
URL: url,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/info.cgi",
|
||||
}
|
||||
|
||||
var file File
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &file)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read file info")
|
||||
}
|
||||
|
||||
return &file, err
|
||||
}
|
||||
|
||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
@@ -61,7 +99,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
@@ -89,7 +127,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
||||
var sharedFiles SharedFolderResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
@@ -118,7 +156,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
||||
filesList = &FilesList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
@@ -146,7 +184,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
||||
foldersList = &FoldersList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
@@ -240,7 +278,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
|
||||
response = &MakeFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create folder")
|
||||
@@ -267,7 +305,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||
@@ -296,7 +334,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
||||
response = &GenericOKResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -308,6 +346,56 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename string) (response *MoveFileResponse, err error) {
|
||||
request := &MoveFileRequest{
|
||||
URLs: []string{url},
|
||||
FolderID: folderID,
|
||||
Rename: rename,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/mv.cgi",
|
||||
}
|
||||
|
||||
response = &MoveFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
||||
request := &CopyFileRequest{
|
||||
URLs: []string{url},
|
||||
FolderID: folderID,
|
||||
Rename: rename,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/cp.cgi",
|
||||
}
|
||||
|
||||
response = &CopyFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
@@ -320,7 +408,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
response = &GetUploadNodeResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||
@@ -363,7 +451,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -397,7 +485,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
||||
response = &EndFileUploadResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -35,7 +35,7 @@ func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
Description: "1Fichier",
|
||||
Config: func(name string, config configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, config configmap.Mapper) {
|
||||
},
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
@@ -167,7 +167,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
//
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(config, opt)
|
||||
if err != nil {
|
||||
@@ -186,16 +186,17 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
||||
baseClient: &http.Client{},
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
ReadMimeType: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
|
||||
@@ -203,8 +204,6 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
@@ -227,7 +226,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&tempF)
|
||||
f.features.Fill(ctx, &tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
@@ -306,10 +305,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
@@ -323,7 +322,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100e9) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
@@ -349,8 +348,10 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(fileUploadResponse.Links) != 1 {
|
||||
return nil, errors.New("unexpected amount of files")
|
||||
if len(fileUploadResponse.Links) == 0 {
|
||||
return nil, errors.New("upload response not found")
|
||||
} else if len(fileUploadResponse.Links) > 1 {
|
||||
fs.Debugf(remote, "Multiple upload responses found, using the first")
|
||||
}
|
||||
|
||||
link := fileUploadResponse.Links[0]
|
||||
@@ -364,7 +365,6 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
fs: f,
|
||||
remote: remote,
|
||||
file: File{
|
||||
ACL: 0,
|
||||
CDN: 0,
|
||||
Checksum: link.Whirlpool,
|
||||
ContentType: "",
|
||||
@@ -417,9 +417,89 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.New("couldn't move file")
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0])
|
||||
if err != nil {
|
||||
return nil, errors.New("couldn't read file data")
|
||||
}
|
||||
dstObj.setMetaData(*file)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side move operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.New("couldn't move file")
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||
if err != nil {
|
||||
return nil, errors.New("couldn't read file data")
|
||||
}
|
||||
dstObj.setMetaData(*file)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return o.(*Object).file.URL, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ dircache.DirCacher = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -4,13 +4,11 @@ package fichier
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFichier:",
|
||||
})
|
||||
|
||||
@@ -72,6 +72,10 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||
//return errors.New("setting modtime is not supported for 1fichier remotes")
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(file File) {
|
||||
o.file = file
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, o.file.Size)
|
||||
@@ -90,7 +94,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.rest.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
package fichier
|
||||
|
||||
// FileInfoRequest is the request structure of the corresponding request
|
||||
type FileInfoRequest struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// ListFolderRequest is the request structure of the corresponding request
|
||||
type ListFolderRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
@@ -49,6 +54,39 @@ type MakeFolderResponse struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// MoveFileRequest is the request structure of the corresponding request
|
||||
type MoveFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
FolderID int `json:"destination_folder_id"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// MoveFileResponse is the response structure of the corresponding request
|
||||
type MoveFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
type CopyFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
FolderID int `json:"folder_id"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// CopyFileResponse is the response structure of the corresponding request
|
||||
type CopyFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Copied int `json:"copied"`
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
type FileCopy struct {
|
||||
FromURL string `json:"from_url"`
|
||||
ToURL string `json:"to_url"`
|
||||
}
|
||||
|
||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||
type GetUploadNodeResponse struct {
|
||||
ID string `json:"id"`
|
||||
@@ -86,7 +124,6 @@ type EndFileUploadResponse struct {
|
||||
|
||||
// File is the structure how 1Fichier returns a File
|
||||
type File struct {
|
||||
ACL int `json:"acl"`
|
||||
CDN int `json:"cdn"`
|
||||
Checksum string `json:"checksum"`
|
||||
ContentType string `json:"content-type"`
|
||||
|
||||
391
backend/filefabric/api/types.go
Normal file
391
backend/filefabric/api/types.go
Normal file
@@ -0,0 +1,391 @@
|
||||
// Package api has type definitions for filefabric
|
||||
//
|
||||
// Converted from the API responses with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// TimeFormat for parameters (UTC)
|
||||
timeFormatParameters = `2006-01-02 15:04:05`
|
||||
// "2020-08-11 10:10:04" for JSON parsing
|
||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// filefabric API
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).UTC().Format(timeFormatJSON)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
var zeroTime = []byte(`"0000-00-00 00:00:00"`)
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time (in UTC)
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
// Set a Zero time.Time if we receive a zero time input
|
||||
if bytes.Equal(data, zeroTime) {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
}
|
||||
newT, err := time.Parse(timeFormatJSON, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String turns a Time into a string in UTC suitable for the API
|
||||
// parameters
|
||||
func (t Time) String() string {
|
||||
return time.Time(t).UTC().Format(timeFormatParameters)
|
||||
}
|
||||
|
||||
// Status return returned in all status responses
|
||||
type Status struct {
|
||||
Code string `json:"status"`
|
||||
Message string `json:"statusmessage"`
|
||||
TaskID string `json:"taskid"`
|
||||
// Warning string `json:"warning"` // obsolete
|
||||
}
|
||||
|
||||
// Status statisfies the error interface
|
||||
func (e *Status) Error() string {
|
||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||
}
|
||||
|
||||
// OK returns true if the status is all good
|
||||
func (e *Status) OK() bool {
|
||||
return e.Code == "ok"
|
||||
}
|
||||
|
||||
// GetCode returns the status code if any
|
||||
func (e *Status) GetCode() string {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// OKError defines an interface for items which can be OK or be an error
|
||||
type OKError interface {
|
||||
error
|
||||
OK() bool
|
||||
GetCode() string
|
||||
}
|
||||
|
||||
// Check Status satisfies the OKError interface
|
||||
var _ OKError = (*Status)(nil)
|
||||
|
||||
// EmptyResponse is response which just returns the error condition
|
||||
type EmptyResponse struct {
|
||||
Status
|
||||
}
|
||||
|
||||
// GetTokenByAuthTokenResponse is the response to getTokenByAuthToken
|
||||
type GetTokenByAuthTokenResponse struct {
|
||||
Status
|
||||
Token string `json:"token"`
|
||||
UserID string `json:"userid"`
|
||||
AllowLoginRemember string `json:"allowloginremember"`
|
||||
LastLogin Time `json:"lastlogin"`
|
||||
AutoLoginCode string `json:"autologincode"`
|
||||
}
|
||||
|
||||
// ApplianceInfo is the response to getApplianceInfo
|
||||
type ApplianceInfo struct {
|
||||
Status
|
||||
Sitetitle string `json:"sitetitle"`
|
||||
OauthLoginSupport string `json:"oauthloginsupport"`
|
||||
IsAppliance string `json:"isappliance"`
|
||||
SoftwareVersion string `json:"softwareversion"`
|
||||
SoftwareVersionLabel string `json:"softwareversionlabel"`
|
||||
}
|
||||
|
||||
// GetFolderContentsResponse is returned from getFolderContents
|
||||
type GetFolderContentsResponse struct {
|
||||
Status
|
||||
Total int `json:"total,string"`
|
||||
Items []Item `json:"filelist"`
|
||||
Folder Item `json:"folder"`
|
||||
From int `json:"from,string"`
|
||||
//Count int `json:"count"`
|
||||
Pid string `json:"pid"`
|
||||
RefreshResult Status `json:"refreshresult"`
|
||||
// Curfolder Item `json:"curfolder"` - sometimes returned as "ROOT"?
|
||||
Parents []Item `json:"parents"`
|
||||
CustomPermissions CustomPermissions `json:"custompermissions"`
|
||||
}
|
||||
|
||||
// ItemType determine whether it is a file or a folder
|
||||
type ItemType uint8
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFile ItemType = 0
|
||||
ItemTypeFolder ItemType = 1
|
||||
)
|
||||
|
||||
// Item ia a File or a Folder
|
||||
type Item struct {
|
||||
ID string `json:"fi_id"`
|
||||
PID string `json:"fi_pid"`
|
||||
// UID string `json:"fi_uid"`
|
||||
Name string `json:"fi_name"`
|
||||
// S3Name string `json:"fi_s3name"`
|
||||
// Extension string `json:"fi_extension"`
|
||||
// Description string `json:"fi_description"`
|
||||
Type ItemType `json:"fi_type,string"`
|
||||
// Created Time `json:"fi_created"`
|
||||
Size int64 `json:"fi_size,string"`
|
||||
ContentType string `json:"fi_contenttype"`
|
||||
// Tags string `json:"fi_tags"`
|
||||
// MainCode string `json:"fi_maincode"`
|
||||
// Public int `json:"fi_public,string"`
|
||||
// Provider string `json:"fi_provider"`
|
||||
// ProviderFolder string `json:"fi_providerfolder"` // folder
|
||||
// Encrypted int `json:"fi_encrypted,string"`
|
||||
// StructType string `json:"fi_structtype"`
|
||||
// Bname string `json:"fi_bname"` // folder
|
||||
// OrgID string `json:"fi_orgid"`
|
||||
// Favorite int `json:"fi_favorite,string"`
|
||||
// IspartOf string `json:"fi_ispartof"` // folder
|
||||
Modified Time `json:"fi_modified"`
|
||||
// LastAccessed Time `json:"fi_lastaccessed"`
|
||||
// Hits int64 `json:"fi_hits,string"`
|
||||
// IP string `json:"fi_ip"` // folder
|
||||
// BigDescription string `json:"fi_bigdescription"`
|
||||
LocalTime Time `json:"fi_localtime"`
|
||||
// OrgfolderID string `json:"fi_orgfolderid"`
|
||||
// StorageIP string `json:"fi_storageip"` // folder
|
||||
// RemoteTime Time `json:"fi_remotetime"`
|
||||
// ProviderOptions string `json:"fi_provideroptions"`
|
||||
// Access string `json:"fi_access"`
|
||||
// Hidden string `json:"fi_hidden"` // folder
|
||||
// VersionOf string `json:"fi_versionof"`
|
||||
Trash bool `json:"trash"`
|
||||
// Isbucket string `json:"isbucket"` // filelist
|
||||
SubFolders int64 `json:"subfolders"` // folder
|
||||
}
|
||||
|
||||
// ItemFields is a | separated list of fields in Item
|
||||
var ItemFields = mustFields(Item{})
|
||||
|
||||
// fields returns the JSON fields in use by opt as a | separated
|
||||
// string.
|
||||
func fields(opt interface{}) (pipeTags string, err error) {
|
||||
var tags []string
|
||||
def := reflect.ValueOf(opt)
|
||||
defType := def.Type()
|
||||
for i := 0; i < def.NumField(); i++ {
|
||||
field := defType.Field(i)
|
||||
tag, ok := field.Tag.Lookup("json")
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if comma := strings.IndexRune(tag, ','); comma >= 0 {
|
||||
tag = tag[:comma]
|
||||
}
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
return strings.Join(tags, "|"), nil
|
||||
}
|
||||
|
||||
// mustFields returns the JSON fields in use by opt as a | separated
|
||||
// string. It panics on failure.
|
||||
func mustFields(opt interface{}) string {
|
||||
tags, err := fields(opt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
// CustomPermissions is returned as part of GetFolderContentsResponse
|
||||
type CustomPermissions struct {
|
||||
Upload string `json:"upload"`
|
||||
CreateSubFolder string `json:"createsubfolder"`
|
||||
Rename string `json:"rename"`
|
||||
Delete string `json:"delete"`
|
||||
Move string `json:"move"`
|
||||
ManagePermissions string `json:"managepermissions"`
|
||||
ListOnly string `json:"listonly"`
|
||||
VisibleInTrash string `json:"visibleintrash"`
|
||||
}
|
||||
|
||||
// DoCreateNewFolderResponse is response from foCreateNewFolder
|
||||
type DoCreateNewFolderResponse struct {
|
||||
Status
|
||||
Item Item `json:"file"`
|
||||
}
|
||||
|
||||
// DoInitUploadResponse is response from doInitUpload
|
||||
type DoInitUploadResponse struct {
|
||||
Status
|
||||
ProviderID string `json:"providerid"`
|
||||
UploadCode string `json:"uploadcode"`
|
||||
FileType string `json:"filetype"`
|
||||
DirectUploadSupport string `json:"directuploadsupport"`
|
||||
ResumeAllowed string `json:"resumeallowed"`
|
||||
}
|
||||
|
||||
// UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi
|
||||
//
|
||||
// Sometimes the response is returned as XML and sometimes as JSON
|
||||
type UploaderResponse struct {
|
||||
FileSize int64 `xml:"filesize" json:"filesize,string"`
|
||||
MD5 string `xml:"md5" json:"md5"`
|
||||
Success string `xml:"success" json:"success"`
|
||||
}
|
||||
|
||||
// UploadStatus is returned from getUploadStatus
|
||||
type UploadStatus struct {
|
||||
Status
|
||||
UploadCode string `json:"uploadcode"`
|
||||
Metafile string `json:"metafile"`
|
||||
Percent int `json:"percent,string"`
|
||||
Uploaded int64 `json:"uploaded,string"`
|
||||
Size int64 `json:"size,string"`
|
||||
Filename string `json:"filename"`
|
||||
Nofile string `json:"nofile"`
|
||||
Completed string `json:"completed"`
|
||||
Completsuccess string `json:"completsuccess"`
|
||||
Completerror string `json:"completerror"`
|
||||
}
|
||||
|
||||
// DoCompleteUploadResponse is the response to doCompleteUpload
|
||||
type DoCompleteUploadResponse struct {
|
||||
Status
|
||||
UploadedSize int64 `json:"uploadedsize,string"`
|
||||
StorageIP string `json:"storageip"`
|
||||
UploadedName string `json:"uploadedname"`
|
||||
// Versioned []interface{} `json:"versioned"`
|
||||
// VersionedID int `json:"versionedid"`
|
||||
// Comment interface{} `json:"comment"`
|
||||
File Item `json:"file"`
|
||||
// UsSize string `json:"us_size"`
|
||||
// PaSize string `json:"pa_size"`
|
||||
// SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||
}
|
||||
|
||||
// Providers is returned as part of UploadResponse
|
||||
type Providers struct {
|
||||
Max string `json:"max"`
|
||||
Used string `json:"used"`
|
||||
ID string `json:"id"`
|
||||
Private string `json:"private"`
|
||||
Limit string `json:"limit"`
|
||||
Percent int `json:"percent"`
|
||||
}
|
||||
|
||||
// Total is returned as part of UploadResponse
|
||||
type Total struct {
|
||||
Max string `json:"max"`
|
||||
Used string `json:"used"`
|
||||
ID string `json:"id"`
|
||||
Priused string `json:"priused"`
|
||||
Primax string `json:"primax"`
|
||||
Limit string `json:"limit"`
|
||||
Percent int `json:"percent"`
|
||||
Pripercent int `json:"pripercent"`
|
||||
}
|
||||
|
||||
// UploadResponse is returned as part of SpaceInfo
|
||||
type UploadResponse struct {
|
||||
Providers []Providers `json:"providers"`
|
||||
Total Total `json:"total"`
|
||||
}
|
||||
|
||||
// SpaceInfo is returned as part of DoCompleteUploadResponse
|
||||
type SpaceInfo struct {
|
||||
Response UploadResponse `json:"response"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// DeleteResponse is returned from doDeleteFile
|
||||
type DeleteResponse struct {
|
||||
Status
|
||||
Deleted []string `json:"deleted"`
|
||||
Errors []interface{} `json:"errors"`
|
||||
ID string `json:"fi_id"`
|
||||
BackgroundTask int `json:"backgroundtask"`
|
||||
UsSize string `json:"us_size"`
|
||||
PaSize string `json:"pa_size"`
|
||||
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||
}
|
||||
|
||||
// FileResponse is returned from doRenameFile
|
||||
type FileResponse struct {
|
||||
Status
|
||||
Item Item `json:"file"`
|
||||
Exists string `json:"exists"`
|
||||
}
|
||||
|
||||
// MoveFilesResponse is returned from doMoveFiles
|
||||
type MoveFilesResponse struct {
|
||||
Status
|
||||
Filesleft string `json:"filesleft"`
|
||||
Addedtobackground string `json:"addedtobackground"`
|
||||
Moved string `json:"moved"`
|
||||
Item Item `json:"file"`
|
||||
IDs []string `json:"fi_ids"`
|
||||
Length int `json:"length"`
|
||||
DirID string `json:"dir_id"`
|
||||
MovedObjects []Item `json:"movedobjects"`
|
||||
// FolderTasks []interface{} `json:"foldertasks"`
|
||||
}
|
||||
|
||||
// TasksResponse is the response to getUserBackgroundTasks
|
||||
type TasksResponse struct {
|
||||
Status
|
||||
Tasks []Task `json:"tasks"`
|
||||
Total string `json:"total"`
|
||||
}
|
||||
|
||||
// BtData is part of TasksResponse
|
||||
type BtData struct {
|
||||
Callback string `json:"callback"`
|
||||
}
|
||||
|
||||
// Task describes a task returned in TasksResponse
|
||||
type Task struct {
|
||||
BtID string `json:"bt_id"`
|
||||
UsID string `json:"us_id"`
|
||||
BtType string `json:"bt_type"`
|
||||
BtData BtData `json:"bt_data"`
|
||||
BtStatustext string `json:"bt_statustext"`
|
||||
BtStatusdata string `json:"bt_statusdata"`
|
||||
BtMessage string `json:"bt_message"`
|
||||
BtProcent string `json:"bt_procent"`
|
||||
BtAdded string `json:"bt_added"`
|
||||
BtStatus string `json:"bt_status"`
|
||||
BtCompleted string `json:"bt_completed"`
|
||||
BtTitle string `json:"bt_title"`
|
||||
BtCredentials string `json:"bt_credentials"`
|
||||
BtHidden string `json:"bt_hidden"`
|
||||
BtAutoremove string `json:"bt_autoremove"`
|
||||
BtDevsite string `json:"bt_devsite"`
|
||||
BtPriority string `json:"bt_priority"`
|
||||
BtReport string `json:"bt_report"`
|
||||
BtSitemarker string `json:"bt_sitemarker"`
|
||||
BtExecuteafter string `json:"bt_executeafter"`
|
||||
BtCompletestatus string `json:"bt_completestatus"`
|
||||
BtSubtype string `json:"bt_subtype"`
|
||||
BtCanceled string `json:"bt_canceled"`
|
||||
Callback string `json:"callback"`
|
||||
CanBeCanceled bool `json:"canbecanceled"`
|
||||
CanBeRestarted bool `json:"canberestarted"`
|
||||
Type string `json:"type"`
|
||||
Status string `json:"status"`
|
||||
Settings string `json:"settings"`
|
||||
}
|
||||
1352
backend/filefabric/filefabric.go
Normal file
1352
backend/filefabric/filefabric.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/filefabric/filefabric_test.go
Normal file
17
backend/filefabric/filefabric_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test filefabric filesystem interface
|
||||
package filefabric_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/filefabric"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFileFabric:",
|
||||
NilObject: (*filefabric.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -16,16 +16,30 @@ import (
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -42,7 +56,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
Help: "FTP username, leave blank for current username, " + currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
@@ -53,16 +67,16 @@ func init() {
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: `Use FTPS over TLS (Implicit)
|
||||
When using implicit FTP over TLS the client will connect using TLS
|
||||
right from the start, which in turn breaks the compatibility with
|
||||
Help: `Use Implicit FTPS (FTP over TLS)
|
||||
When using implicit FTP over TLS the client connects using TLS
|
||||
right from the start which breaks compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "explicit_tls",
|
||||
Help: `Use FTP over TLS (Explicit)
|
||||
When using explicit FTP over TLS the client explicitly request
|
||||
Help: `Use Explicit FTPS (FTP over TLS)
|
||||
When using explicit FTP over TLS the client explicitly requests
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Default: false,
|
||||
@@ -81,6 +95,27 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Help: "Disable using EPSV even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_mlsd",
|
||||
Help: "Disable using MLSD even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "close_timeout",
|
||||
Help: "Maximum time to wait for a response to close.",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -107,22 +142,29 @@ type Options struct {
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
url string
|
||||
user string
|
||||
pass string
|
||||
dialAddr string
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
tlsConf *tls.Config
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
@@ -199,51 +241,86 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type dialCtx struct {
|
||||
f *Fs
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// dial a new connection with fshttp dialer
|
||||
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
|
||||
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d.f.tlsConf != nil {
|
||||
conn = tls.Client(conn, d.f.tlsConf)
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
||||
if f.opt.TLS && f.opt.ExplicitTLS {
|
||||
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
} else if f.opt.TLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
dCtx := dialCtx{f, ctx}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
|
||||
if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
// Initial connection needs to be cleartext for explicit TLS
|
||||
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
}
|
||||
if fs.Config.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: fs.Config.Dump&fs.DumpAuth != 0}))
|
||||
if f.opt.DisableMLSD {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
err = c.Login(f.user, f.pass)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
||||
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
|
||||
}
|
||||
err = c.Login(f.user, f.pass)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Login")
|
||||
}
|
||||
return c, nil
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Get an FTP connection from the pool, or open a new one
|
||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
f.tokens.Get()
|
||||
}
|
||||
accounting.LimitTPS(ctx)
|
||||
f.poolMu.Lock()
|
||||
if len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
@@ -253,7 +330,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
c, err = f.ftpConnection()
|
||||
c, err = f.ftpConnection(ctx)
|
||||
if err != nil && f.opt.Concurrency > 0 {
|
||||
f.tokens.Put()
|
||||
}
|
||||
@@ -292,12 +369,34 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// Drain the pool of any connections
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "closing %d unused connections", len(f.pool))
|
||||
}
|
||||
for i, c := range f.pool {
|
||||
if cErr := c.Quit(); cErr != nil {
|
||||
err = cErr
|
||||
}
|
||||
f.pool[i] = nil
|
||||
}
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -311,7 +410,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
user = currentUser
|
||||
}
|
||||
port := opt.Port
|
||||
if port == "" {
|
||||
@@ -323,22 +422,40 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
if opt.TLS {
|
||||
protocol = "ftps://"
|
||||
}
|
||||
if opt.TLS && opt.ExplicitTLS {
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
}
|
||||
var tlsConfig *tls.Config
|
||||
if opt.TLS || opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: opt.Host,
|
||||
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||
}
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
url: u,
|
||||
user: user,
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
tlsConf: tlsConfig,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
@@ -365,6 +482,12 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return f.drainPool(ctx)
|
||||
}
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
@@ -409,7 +532,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
||||
}
|
||||
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
@@ -423,7 +546,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
@@ -445,7 +568,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(remote)
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -467,8 +590,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
}
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(remote)
|
||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
@@ -489,7 +612,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
@@ -510,7 +633,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}()
|
||||
|
||||
// Wait for List for up to Timeout seconds
|
||||
timer := time.NewTimer(fs.Config.Timeout)
|
||||
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
|
||||
select {
|
||||
case listErr = <-errchan:
|
||||
timer.Stop()
|
||||
@@ -527,7 +650,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// doesn't exist, so check it really doesn't exist if no
|
||||
// entries found.
|
||||
if len(files) == 0 {
|
||||
exists, err := f.dirExists(dir)
|
||||
exists, err := f.dirExists(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
@@ -580,7 +703,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||
err := f.mkParentDir(src.Remote())
|
||||
err := f.mkParentDir(ctx, src.Remote())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||
}
|
||||
@@ -598,12 +721,12 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
|
||||
// getInfo reads the FileInfo for a path
|
||||
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
}
|
||||
@@ -630,12 +753,12 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using unrooted paths
|
||||
func (f *Fs) mkdir(abspath string) error {
|
||||
func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
abspath = path.Clean(abspath)
|
||||
if abspath == "." || abspath == "/" {
|
||||
return nil
|
||||
}
|
||||
fi, err := f.getInfo(abspath)
|
||||
fi, err := f.getInfo(ctx, abspath)
|
||||
if err == nil {
|
||||
if fi.IsDir {
|
||||
return nil
|
||||
@@ -645,11 +768,11 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
||||
}
|
||||
parent := path.Dir(abspath)
|
||||
err = f.mkdir(parent)
|
||||
err = f.mkdir(ctx, parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, connErr := f.getFtpConnection()
|
||||
c, connErr := f.getFtpConnection(ctx)
|
||||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
}
|
||||
@@ -669,23 +792,23 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
|
||||
// mkParentDir makes the parent of remote if necessary and any
|
||||
// directories above that
|
||||
func (f *Fs) mkParentDir(remote string) error {
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
parent := path.Dir(remote)
|
||||
return f.mkdir(path.Join(f.root, parent))
|
||||
return f.mkdir(ctx, path.Join(f.root, parent))
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||
root := path.Join(f.root, dir)
|
||||
return f.mkdir(root)
|
||||
return f.mkdir(ctx, root)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
}
|
||||
@@ -701,11 +824,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err := f.mkParentDir(remote)
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||
}
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
@@ -725,7 +848,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -742,7 +865,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
fi, err := f.getInfo(dstPath)
|
||||
fi, err := f.getInfo(ctx, dstPath)
|
||||
if err == nil {
|
||||
if fi.IsDir {
|
||||
return fs.ErrorDirExists
|
||||
@@ -753,13 +876,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkdir(path.Dir(dstPath))
|
||||
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
}
|
||||
|
||||
// Do the move
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
@@ -843,8 +966,8 @@ func (f *ftpReadCloser) Close() error {
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
// Wait for Close for up to 60 seconds by default
|
||||
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
@@ -891,7 +1014,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection()
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
@@ -926,7 +1049,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(o, "Removed after failed upload: %v", err)
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection()
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
@@ -938,7 +1061,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
o.info, err = o.fs.getInfo(path)
|
||||
o.info, err = o.fs.getInfo(ctx, path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "update getinfo")
|
||||
}
|
||||
@@ -950,14 +1073,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// defer fs.Trace(o, "")("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// Check if it's a directory or a file
|
||||
info, err := o.fs.getInfo(path)
|
||||
info, err := o.fs.getInfo(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir {
|
||||
err = o.fs.Rmdir(ctx, o.remote)
|
||||
} else {
|
||||
c, err := o.fs.getFtpConnection()
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
@@ -973,5 +1096,6 @@ var (
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -76,14 +76,14 @@ func init() {
|
||||
Prefix: "gcs",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config("google cloud storage", name, m, storageConfig, nil)
|
||||
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
@@ -329,7 +329,10 @@ func (f *Fs) Features() *fs.Features {
|
||||
}
|
||||
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
func shouldRetry(ctx context.Context, err error) (again bool, errOut error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
again = false
|
||||
if err != nil {
|
||||
if fserrors.ShouldRetry(err) {
|
||||
@@ -370,12 +373,12 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
||||
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
@@ -386,8 +389,7 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
|
||||
// Parse config into Options struct
|
||||
@@ -412,14 +414,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if opt.Anonymous {
|
||||
oAuthClient = &http.Client{}
|
||||
oAuthClient = fshttp.NewClient(ctx)
|
||||
} else if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
|
||||
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
ctx := context.Background()
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
@@ -433,7 +435,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
@@ -442,7 +444,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
@@ -456,7 +458,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
@@ -522,7 +524,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
@@ -565,7 +567,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size == 0 {
|
||||
if isDirectory {
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
@@ -625,7 +627,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -751,7 +753,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
@@ -786,7 +788,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
@@ -803,7 +805,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -813,7 +815,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -841,20 +843,27 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if rewriteResponse.Done {
|
||||
break
|
||||
}
|
||||
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
|
||||
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
dstObj.setMetaData(newObject)
|
||||
dstObj.setMetaData(rewriteResponse.Resource)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
@@ -935,7 +944,7 @@ func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, er
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
@@ -1006,7 +1015,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1022,11 +1031,10 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
req, err := http.NewRequest("GET", o.url, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
@@ -1038,7 +1046,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
_ = res.Body.Close() // ignore error
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1085,6 +1093,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
object.ContentLanguage = value
|
||||
case "content-type":
|
||||
object.ContentType = value
|
||||
case "x-goog-storage-class":
|
||||
object.StorageClass = value
|
||||
default:
|
||||
const googMetaPrefix = "x-goog-meta-"
|
||||
if strings.HasPrefix(lowerKey, googMetaPrefix) {
|
||||
@@ -1102,7 +1112,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1117,7 +1127,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func init() {
|
||||
Prefix: "gphotos",
|
||||
Description: "Google Photos",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -95,7 +95,7 @@ func init() {
|
||||
}
|
||||
|
||||
// Do the oauth
|
||||
err = oauthutil.Config("google photos", name, m, oauthConfig, nil)
|
||||
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
@@ -132,15 +132,33 @@ you want to read the media.`,
|
||||
Default: 2000,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "include_archived",
|
||||
Default: false,
|
||||
Help: `Also view and download archived media.
|
||||
|
||||
By default rclone does not request archived media. Thus, when syncing,
|
||||
archived media is not visible in directory listings or transferred.
|
||||
|
||||
Note that media in albums is always visible and synced, no matter
|
||||
their archive status.
|
||||
|
||||
With this flag, archived media are always visible in directory
|
||||
listings and transferred.
|
||||
|
||||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
IncludeArchived bool `config:"include_archived"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -206,6 +224,10 @@ func (f *Fs) startYear() int {
|
||||
return f.opt.StartYear
|
||||
}
|
||||
|
||||
func (f *Fs) includeArchived() bool {
|
||||
return f.opt.IncludeArchived
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
@@ -218,7 +240,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -246,7 +271,7 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -254,8 +279,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
@@ -272,14 +297,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
unAuth: rest.NewClient(baseClient),
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
ts: ts,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
startTime: time.Now(),
|
||||
albums: map[bool]*albums{},
|
||||
uploaded: dirtree.New(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
_, _, pattern := patterns.match(f.root, "", true)
|
||||
@@ -288,7 +313,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var leaf string
|
||||
f.root, leaf = path.Split(f.root)
|
||||
f.root = strings.TrimRight(f.root, "/")
|
||||
_, err := f.NewObject(context.TODO(), leaf)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err == nil {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -307,7 +332,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
||||
var openIDconfig map[string]interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't read openID config")
|
||||
@@ -336,7 +361,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read user info")
|
||||
@@ -367,7 +392,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
var res interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't revoke token")
|
||||
@@ -454,7 +479,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list albums")
|
||||
@@ -497,13 +522,19 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
}
|
||||
filter.PageSize = listChunks
|
||||
filter.PageToken = ""
|
||||
if filter.AlbumID == "" { // album ID and filters cannot be set together, else error 400 INVALID_ARGUMENT
|
||||
if filter.Filters == nil {
|
||||
filter.Filters = &api.Filters{}
|
||||
}
|
||||
filter.Filters.IncludeArchivedMedia = &f.opt.IncludeArchived
|
||||
}
|
||||
lastID := ""
|
||||
for {
|
||||
var result api.MediaItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
@@ -647,7 +678,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create album")
|
||||
@@ -782,7 +813,7 @@ func (o *Object) Size() int64 {
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Reading size failed: %v", err)
|
||||
@@ -833,7 +864,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't get media item")
|
||||
@@ -910,7 +941,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -965,10 +996,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
token, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't upload file")
|
||||
@@ -996,7 +1027,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var result api.BatchCreateResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create media item")
|
||||
@@ -1041,7 +1072,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't delete item from album")
|
||||
|
||||
@@ -35,14 +35,14 @@ func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(*fstest.RemoteName)
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create local Fs pointing at testfiles
|
||||
localFs, err := fs.NewFs("testfiles")
|
||||
localFs, err := fs.NewFs(ctx, "testfiles")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CreateAlbum", func(t *testing.T) {
|
||||
@@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
// Check it is there in the date/month/year heirachy
|
||||
// Check it is there in the date/month/year hierarchy
|
||||
// 2013-07-13 is the creation date of the folder
|
||||
checkPresent := func(t *testing.T, objPath string) {
|
||||
entries, err := f.List(ctx, objPath)
|
||||
@@ -155,7 +155,7 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("NewFsIsFile", func(t *testing.T) {
|
||||
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
|
||||
fNew, err := fs.NewFs(ctx, *fstest.RemoteName+remote)
|
||||
assert.Equal(t, fs.ErrorIsFile, err)
|
||||
leaf := path.Base(remote)
|
||||
o, err := fNew.NewObject(ctx, leaf)
|
||||
|
||||
@@ -24,6 +24,7 @@ type lister interface {
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
startYear() int
|
||||
includeArchived() bool
|
||||
}
|
||||
|
||||
// dirPattern describes a single directory pattern
|
||||
|
||||
@@ -64,6 +64,11 @@ func (f *testLister) startYear() int {
|
||||
return 2000
|
||||
}
|
||||
|
||||
// mock includeArchived for testing
|
||||
func (f *testLister) includeArchived() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func TestPatternMatch(t *testing.T) {
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
|
||||
320
backend/hdfs/fs.go
Normal file
320
backend/hdfs/fs.go
Normal file
@@ -0,0 +1,320 @@
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/colinmarc/hdfs/v2"
|
||||
krb "github.com/jcmturner/gokrb5/v8/client"
|
||||
"github.com/jcmturner/gokrb5/v8/config"
|
||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Fs represents a HDFS server
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
client *hdfs.Client
|
||||
}
|
||||
|
||||
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||
func getKerberosClient() (*krb.Client, error) {
|
||||
configPath := os.Getenv("KRB5_CONFIG")
|
||||
if configPath == "" {
|
||||
configPath = "/etc/krb5.conf"
|
||||
}
|
||||
|
||||
cfg, err := config.Load(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the ccache location from the environment, falling back to the
|
||||
// default location.
|
||||
ccachePath := os.Getenv("KRB5CCNAME")
|
||||
if strings.Contains(ccachePath, ":") {
|
||||
if strings.HasPrefix(ccachePath, "FILE:") {
|
||||
ccachePath = strings.SplitN(ccachePath, ":", 2)[1]
|
||||
} else {
|
||||
return nil, fmt.Errorf("unusable ccache: %s", ccachePath)
|
||||
}
|
||||
} else if ccachePath == "" {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
|
||||
}
|
||||
|
||||
ccache, err := credentials.LoadCCache(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := krb.NewFromCCache(ccache, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
options := hdfs.ClientOptions{
|
||||
Addresses: []string{opt.Namenode},
|
||||
UseDatanodeHostname: false,
|
||||
}
|
||||
|
||||
if opt.ServicePrincipalName != "" {
|
||||
options.KerberosClient, err = getKerberosClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
|
||||
}
|
||||
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
||||
|
||||
if opt.DataTransferProtection != "" {
|
||||
options.DataTransferProtection = opt.DataTransferProtection
|
||||
}
|
||||
} else {
|
||||
options.User = opt.Username
|
||||
}
|
||||
|
||||
client, err := hdfs.NewClient(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: fs.GetConfig(ctx),
|
||||
client: client,
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
info, err := f.client.Stat(f.realpath(""))
|
||||
if err == nil && !info.IsDir() {
|
||||
f.root = path.Dir(f.root)
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of this fs
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Hashes are not supported
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// NewObject finds file at remote or return fs.ErrorObjectNotFound
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
realpath := f.realpath(remote)
|
||||
fs.Debugf(f, "new [%s]", realpath)
|
||||
|
||||
info, err := f.ensureFile(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: info.Size(),
|
||||
modTime: info.ModTime(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
realpath := f.realpath(dir)
|
||||
fs.Debugf(f, "list [%s]", realpath)
|
||||
|
||||
err = f.ensureDirectory(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
list, err := f.client.ReadDir(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, x := range list {
|
||||
stdName := f.opt.Enc.ToStandardName(x.Name())
|
||||
remote := path.Join(dir, stdName)
|
||||
if x.IsDir() {
|
||||
entries = append(entries, fs.NewDir(remote, x.ModTime()))
|
||||
} else {
|
||||
entries = append(entries, &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: x.Size(),
|
||||
modTime: x.ModTime()})
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
return o, err
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir makes a directory
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fs.Debugf(f, "mkdir [%s]", f.realpath(dir))
|
||||
return f.client.MkdirAll(f.realpath(dir), 0755)
|
||||
}
|
||||
|
||||
// Rmdir deletes the directory
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
realpath := f.realpath(dir)
|
||||
fs.Debugf(f, "rmdir [%s]", realpath)
|
||||
|
||||
err := f.ensureDirectory(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// do not remove empty directory
|
||||
list, err := f.client.ReadDir(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(list) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return f.client.Remove(realpath)
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
realpath := f.realpath(dir)
|
||||
fs.Debugf(f, "purge [%s]", realpath)
|
||||
|
||||
err := f.ensureDirectory(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.client.RemoveAll(realpath)
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := f.client.StatFs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(info.Capacity)),
|
||||
Used: fs.NewUsageValue(int64(info.Used)),
|
||||
Free: fs.NewUsageValue(int64(info.Remaining)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *Fs) ensureDirectory(realpath string) error {
|
||||
info, err := f.client.Stat(realpath)
|
||||
|
||||
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) {
|
||||
info, err := f.client.Stat(realpath)
|
||||
|
||||
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (f *Fs) realpath(dir string) string {
|
||||
return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir))
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
)
|
||||
86
backend/hdfs/hdfs.go
Normal file
86
backend/hdfs/hdfs.go
Normal file
@@ -0,0 +1,86 @@
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "hdfs",
|
||||
Description: "Hadoop distributed file system",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "hadoop name node and port",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "namenode:8020",
|
||||
Help: "Connect to host namenode at port 8020",
|
||||
}},
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "hadoop user name",
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root",
|
||||
}},
|
||||
}, {
|
||||
Name: "service_principal_name",
|
||||
Help: `Kerberos service principal name for the namenode
|
||||
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(<SERVICE>/<FQDN>) for the namenode.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "hdfs/namenode.hadoop.docker",
|
||||
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon),
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options for this backend
|
||||
type Options struct {
|
||||
Namenode string `config:"namenode"`
|
||||
Username string `config:"username"`
|
||||
ServicePrincipalName string `config:"service_principal_name"`
|
||||
DataTransferProtection string `config:"data_transfer_protection"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// xPath make correct file path with leading '/'
|
||||
func xPath(root string, tail string) string {
|
||||
if !strings.HasPrefix(root, "/") {
|
||||
root = "/" + root
|
||||
}
|
||||
return path.Join(root, tail)
|
||||
}
|
||||
20
backend/hdfs/hdfs_test.go
Normal file
20
backend/hdfs/hdfs_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test HDFS filesystem interface
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package hdfs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hdfs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHdfs:",
|
||||
NilObject: (*hdfs.Object)(nil),
|
||||
})
|
||||
}
|
||||
6
backend/hdfs/hdfs_unsupported.go
Normal file
6
backend/hdfs/hdfs_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for hdfs for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
|
||||
package hdfs
|
||||
177
backend/hdfs/object.go
Normal file
177
backend/hdfs/object.go
Normal file
@@ -0,0 +1,177 @@
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Object describes an HDFS file
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
realpath := o.fs.realpath(o.Remote())
|
||||
err := o.fs.client.Chtimes(realpath, modTime, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Hash is not supported
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
realpath := o.realpath()
|
||||
fs.Debugf(o.fs, "open [%s]", realpath)
|
||||
f, err := o.fs.client.Open(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
}
|
||||
}
|
||||
|
||||
_, err = f.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if limit != -1 {
|
||||
in = readers.NewLimitedReadCloser(f, limit)
|
||||
} else {
|
||||
in = f
|
||||
}
|
||||
|
||||
return in, err
|
||||
}
|
||||
|
||||
// Update object
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
realpath := o.fs.realpath(src.Remote())
|
||||
dirname := path.Dir(realpath)
|
||||
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||
|
||||
err := o.fs.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := o.fs.client.Stat(realpath)
|
||||
if err == nil {
|
||||
err = o.fs.client.Remove(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
out, err := o.fs.client.Create(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
rerr := o.fs.client.Remove(realpath)
|
||||
if rerr != nil {
|
||||
fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return err
|
||||
}
|
||||
|
||||
err = out.Close()
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return err
|
||||
}
|
||||
|
||||
info, err = o.fs.client.Stat(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.size = info.Size()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
realpath := o.fs.realpath(o.remote)
|
||||
fs.Debugf(o.fs, "remove [%s]", realpath)
|
||||
return o.fs.client.Remove(realpath)
|
||||
}
|
||||
|
||||
func (o *Object) realpath() string {
|
||||
return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote))
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
@@ -58,7 +58,7 @@ The input format is comma separated list of key,value pairs. Standard
|
||||
|
||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
@@ -115,8 +115,9 @@ type Options struct {
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
endpoint *url.URL
|
||||
endpointURL string // endpoint as a string
|
||||
httpClient *http.Client
|
||||
@@ -145,8 +146,7 @@ func statusError(res *http.Response, err error) error {
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -172,7 +172,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
@@ -183,9 +183,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
@@ -210,17 +209,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
if isFile {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -389,11 +390,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
// Do the request
|
||||
req, err := http.NewRequest("GET", URL, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
f.addHeaders(req)
|
||||
res, err := f.httpClient.Do(req)
|
||||
if err == nil {
|
||||
@@ -440,14 +440,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var (
|
||||
entriesMu sync.Mutex // to protect entries
|
||||
wg sync.WaitGroup
|
||||
in = make(chan string, fs.Config.Checkers)
|
||||
checkers = f.ci.Checkers
|
||||
in = make(chan string, checkers)
|
||||
)
|
||||
add := func(entry fs.DirEntry) {
|
||||
entriesMu.Lock()
|
||||
entries = append(entries, entry)
|
||||
entriesMu.Unlock()
|
||||
}
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
for i := 0; i < checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -544,11 +545,10 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
o.fs.addHeaders(req)
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
@@ -585,7 +585,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
@@ -593,11 +593,10 @@ func (o *Object) Storable() bool {
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
// Add optional headers
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
// Configure the remote
|
||||
config.LoadConfig()
|
||||
configfile.LoadConfig(context.Background())
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
@@ -69,7 +69,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(remoteName, "", m)
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
@@ -214,7 +214,7 @@ func TestIsAFileRoot(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "one%.txt", m)
|
||||
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f, false)
|
||||
@@ -224,7 +224,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
entries, err := f.List(context.Background(), "")
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -24,7 +24,7 @@ func newAuth(f *Fs) *auth {
|
||||
// Request constructs an http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
@@ -38,7 +38,7 @@ func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(resp *http.Response) error {
|
||||
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ package hubic
|
||||
|
||||
// This uses the normal swift mechanism to update the credentials and
|
||||
// ignores the expires field returned by the Hubic API. This may need
|
||||
// to be revisted after some actual experience.
|
||||
// to be revisited after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift"
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -56,8 +56,8 @@ func init() {
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("hubic", name, m, oauthConfig, nil)
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
@@ -71,7 +71,7 @@ func init() {
|
||||
type credentials struct {
|
||||
Token string `json:"token"` // OpenStack token
|
||||
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||
Expires string `json:"expires"` // Expires date - eg "2015-11-09T14:24:56+01:00"
|
||||
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||
}
|
||||
|
||||
// Fs represents a remote hubic
|
||||
@@ -110,11 +110,10 @@ func (f *Fs) String() string {
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -146,8 +145,8 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||
}
|
||||
@@ -157,13 +156,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Make the swift Connection
|
||||
ci := fs.GetConfig(ctx)
|
||||
c := &swiftLib.Connection{
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
}
|
||||
err = c.Authenticate()
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||
}
|
||||
@@ -176,7 +176,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
|
||||
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -153,9 +153,9 @@ type CustomerInfo struct {
|
||||
AccountType string `json:"account_type"`
|
||||
SubscriptionType string `json:"subscription_type"`
|
||||
Usage int64 `json:"usage"`
|
||||
Qouta int64 `json:"quota"`
|
||||
Quota int64 `json:"quota"`
|
||||
BusinessUsage int64 `json:"business_usage"`
|
||||
BusinessQouta int64 `json:"business_quota"`
|
||||
BusinessQuota int64 `json:"business_quota"`
|
||||
WriteLocked bool `json:"write_locked"`
|
||||
ReadLocked bool `json:"read_locked"`
|
||||
LockedCause interface{} `json:"locked_cause"`
|
||||
@@ -386,7 +386,7 @@ type Error struct {
|
||||
Cause string `xml:"cause"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
|
||||
@@ -63,6 +63,10 @@ const (
|
||||
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
v1configVersion = 0
|
||||
|
||||
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -83,9 +87,7 @@ func init() {
|
||||
Name: "jottacloud",
|
||||
Description: "Jottacloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
refresh := false
|
||||
if version, ok := m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
@@ -107,11 +109,18 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
||||
if config.Confirm(false) {
|
||||
v1config(ctx, name, m)
|
||||
} else {
|
||||
fmt.Printf("Choose authentication type:\n" +
|
||||
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
|
||||
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
|
||||
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
|
||||
|
||||
switch config.ChooseNumber("Your choice", 1, 3) {
|
||||
case 1:
|
||||
v2config(ctx, name, m)
|
||||
case 2:
|
||||
v1config(ctx, name, m)
|
||||
case 3:
|
||||
teliaCloudConfig(ctx, name, m)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
@@ -226,13 +235,56 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// v1config configure a jottacloud backend using legacy authentification
|
||||
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
|
||||
teliaCloudOauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaCloudAuthURL,
|
||||
TokenURL: teliaCloudTokenURL,
|
||||
},
|
||||
ClientID: teliaCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
|
||||
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
m.Set(configClientID, teliaCloudClientID)
|
||||
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||
}
|
||||
|
||||
// v1config configure a jottacloud backend using legacy authentication
|
||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm(false) {
|
||||
@@ -275,7 +327,7 @@ func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
@@ -323,7 +375,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuthV1 runs the actual token request for V1 authentification
|
||||
// doAuthV1 runs the actual token request for V1 authentication
|
||||
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
@@ -365,14 +417,17 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentification
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
m.Set(configClientID, "jottacli")
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
@@ -384,8 +439,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
@@ -403,7 +457,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
}
|
||||
|
||||
// doAuthV2 runs the actual token request for V2 authentification
|
||||
// doAuthV2 runs the actual token request for V2 authentication
|
||||
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
@@ -551,7 +605,7 @@ func (f *Fs) setEndpointURL() {
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
||||
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
@@ -564,7 +618,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -639,8 +693,7 @@ func grantTypeFilter(req *http.Request) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -662,7 +715,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
|
||||
if ver == configVersion {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
@@ -698,7 +751,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Create OAuth Client
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
}
|
||||
@@ -712,14 +765,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
}).Fill(f)
|
||||
WriteMimeType: false,
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
||||
f.features.ListR = nil
|
||||
@@ -728,6 +781,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == fs.ErrorNotAFile {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -801,7 +857,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -830,7 +886,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var result api.JottaFolder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -942,7 +998,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -1048,7 +1104,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't purge directory")
|
||||
@@ -1087,8 +1143,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
retry, _ := shouldRetry(resp, err)
|
||||
return (retry && resp.StatusCode != 500), err
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1096,7 +1151,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -1126,7 +1181,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
//return f.newObjectWithInfo(remote, &result)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -1157,7 +1212,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1192,18 +1247,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
// surprise! jottacloud fucked up dirmove - the api spits out an error but
|
||||
// dir gets moved regardless
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.StatusCode == 500 {
|
||||
_, err := f.NewObject(ctx, dstRemote)
|
||||
if err == fs.ErrorNotAFile {
|
||||
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
}
|
||||
@@ -1228,7 +1271,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var result api.JottaFile
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -1406,7 +1449,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1477,6 +1520,8 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
size := src.Size()
|
||||
md5String, err := src.Hash(ctx, hash.MD5)
|
||||
if err != nil || md5String == "" {
|
||||
@@ -1517,13 +1562,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
|
||||
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
|
||||
if response.State != "COMPLETED" {
|
||||
// how much do we still have to upload?
|
||||
remainingBytes := size - response.ResumePos
|
||||
@@ -1582,7 +1627,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -256,7 +256,7 @@ func (f *Fs) fullPath(part string) string {
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
@@ -267,7 +267,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
return nil, err
|
||||
}
|
||||
httpClient := httpclient.New()
|
||||
httpClient.Client = fshttp.NewClient(fs.Config)
|
||||
httpClient.Client = fshttp.NewClient(ctx)
|
||||
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
@@ -287,7 +287,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
DuplicateFiles: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
for _, m := range mounts {
|
||||
if opt.MountID != "" {
|
||||
if m.Id == opt.MountID {
|
||||
|
||||
@@ -42,8 +42,9 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names",
|
||||
@@ -70,6 +71,20 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead)
|
||||
|
||||
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
|
||||
However, on unix it reads as the length of the text in the link. This may cause errors like this when
|
||||
syncing:
|
||||
|
||||
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
|
||||
|
||||
Setting this flag causes rclone to read the link and use that as the size of the link
|
||||
instead of 0 which in most cases fixes the problem.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
@@ -87,13 +102,13 @@ Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
- source file is being updated" if the file changes during upload.
|
||||
|
||||
However on some file systems this modification time check may fail (eg
|
||||
However on some file systems this modification time check may fail (e.g.
|
||||
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
||||
check can be disabled with this flag.
|
||||
|
||||
If this flag is set, rclone will use its best efforts to transfer a
|
||||
file which is being updated. If the file is only having things
|
||||
appended to it (eg a log) then rclone will transfer the log file with
|
||||
appended to it (e.g. a log) then rclone will transfer the log file with
|
||||
the size it had the first time rclone saw it.
|
||||
|
||||
If the file is being modified throughout (not just appended to) then
|
||||
@@ -134,6 +149,17 @@ Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
Stream) may incorrectly set the actual file size equal to the
|
||||
preallocated space, causing checksum and file size checks to fail.
|
||||
Use this flag to disable preallocation.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads
|
||||
@@ -170,12 +196,14 @@ type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
ZeroSizeLinks bool `config:"zero_size_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
CaseSensitive bool `config:"case_sensitive"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
NoPreAllocate bool `config:"no_preallocate"`
|
||||
NoSparse bool `config:"no_sparse"`
|
||||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
@@ -217,7 +245,7 @@ type Object struct {
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -245,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
SlowHash: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
@@ -456,8 +484,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
if os.IsNotExist(err) {
|
||||
// Skip bad symlinks
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||
err = accounting.Stats(ctx).Error(err)
|
||||
@@ -637,7 +665,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -701,7 +729,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1112,10 +1140,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = file.PreAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
if !o.fs.opt.NoPreAllocate {
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = file.PreAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
if err == file.ErrDiskFull {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
out = f
|
||||
} else {
|
||||
@@ -1202,9 +1236,11 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
return nil, err
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = file.PreAllocate(size, out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
if !f.opt.NoPreAllocate {
|
||||
err = file.PreAllocate(size, out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
}
|
||||
if !f.opt.NoSparse && file.SetSparseImplemented {
|
||||
sparseWarning.Do(func() {
|
||||
@@ -1213,7 +1249,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
// Set the file to be a sparse file (important on Windows)
|
||||
err = file.SetSparse(out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
||||
fs.Errorf(o, "Failed to set sparse: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1231,6 +1267,16 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
// On Windows links read as 0 size so set the correct size here
|
||||
// Optionally, users can turn this feature on with the zero_size_links flag
|
||||
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||
} else {
|
||||
o.size = int64(len(linkdst))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -89,9 +88,6 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
@@ -121,9 +117,6 @@ func TestSymlink(t *testing.T) {
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
@@ -142,9 +135,7 @@ func TestSymlink(t *testing.T) {
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
@@ -172,6 +163,6 @@ func TestSymlinkError(t *testing.T) {
|
||||
"links": "true",
|
||||
"copy_links": "true",
|
||||
}
|
||||
_, err := NewFs("local", "/", m)
|
||||
_, err := NewFs(context.Background(), "local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
22
backend/local/symlink.go
Normal file
22
backend/local/symlink.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build !windows,!plan9,!js
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// isCircularSymlinkError checks if the current error code is because of a circular symlink
|
||||
func isCircularSymlinkError(err error) bool {
|
||||
if err != nil {
|
||||
if newerr, ok := err.(*os.PathError); ok {
|
||||
if errcode, ok := newerr.Err.(syscall.Errno); ok {
|
||||
if errcode == syscall.ELOOP {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
17
backend/local/symlink_other.go
Normal file
17
backend/local/symlink_other.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// +build windows plan9 js
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// isCircularSymlinkError checks if the current error code is because of a circular symlink
|
||||
func isCircularSymlinkError(err error) bool {
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "The name of the file cannot be resolved by the system") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -117,7 +117,7 @@ type ListItem struct {
|
||||
Name string `json:"name"`
|
||||
Home string `json:"home"`
|
||||
Size int64 `json:"size"`
|
||||
Mtime int64 `json:"mtime,omitempty"`
|
||||
Mtime uint64 `json:"mtime,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
VirusScan string `json:"virus_scan,omitempty"`
|
||||
Tree string `json:"tree,omitempty"`
|
||||
@@ -159,71 +159,6 @@ type FolderInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// ShardInfoResponse ...
|
||||
type ShardInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body struct {
|
||||
Video []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"video"`
|
||||
ViewDirect []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view_direct"`
|
||||
WeblinkView []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_view"`
|
||||
WeblinkVideo []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_video"`
|
||||
WeblinkGet []struct {
|
||||
Count int `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_get"`
|
||||
Stock []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"stock"`
|
||||
WeblinkThumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_thumbnails"`
|
||||
PublicUpload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"public_upload"`
|
||||
Auth []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"auth"`
|
||||
Web []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"web"`
|
||||
View []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view"`
|
||||
Upload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"upload"`
|
||||
Get []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"get"`
|
||||
Thumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"thumbnails"`
|
||||
} `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// CleanupResponse ...
|
||||
type CleanupResponse struct {
|
||||
Email string `json:"email"`
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -101,6 +102,7 @@ func init() {
|
||||
This feature is called "speedup" or "put by hash". It is especially efficient
|
||||
in case of generally available files like popular books, video or audio clips,
|
||||
because files are searched by hash in all accounts of all mailru users.
|
||||
It is meaningless and ineffective if source file is unique or encrypted.
|
||||
Please note that rclone may need local memory and disk space to calculate
|
||||
content hash in advance and decide whether full upload is required.
|
||||
Also, if rclone does not know file size in advance (e.g. in case of
|
||||
@@ -191,7 +193,7 @@ This option must not be used by an ordinary user. It is intended only to
|
||||
facilitate remote troubleshooting of backend issues. Strict meaning of
|
||||
flags is not documented and not guaranteed to persist between releases.
|
||||
Quirks will be removed when the backend grows stable.
|
||||
Supported quirks: atomicmkdir binlist gzip insecure retry400`,
|
||||
Supported quirks: atomicmkdir binlist unknowndirs`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -232,14 +234,14 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this response and err
|
||||
// deserve to be retried. It returns the err as a convenience.
|
||||
// Retries password authorization (once) in a special case of access denied.
|
||||
func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if res != nil && res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed {
|
||||
reAuthErr := f.reAuthorize(opts, err)
|
||||
return reAuthErr == nil, err // return an original error
|
||||
}
|
||||
if res != nil && res.StatusCode == 400 && f.quirks.retry400 {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -274,8 +276,9 @@ type Fs struct {
|
||||
name string
|
||||
root string // root path
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
speedupGlobs []string // list of file name patterns eligible for speedup
|
||||
speedupAny bool // true if all file names are aligible for speedup
|
||||
speedupAny bool // true if all file names are eligible for speedup
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // REST API client
|
||||
cli *http.Client // underlying HTTP client (for authorize)
|
||||
@@ -295,9 +298,8 @@ type Fs struct {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// fs.Debugf(nil, ">>> NewFs %q %q", name, root)
|
||||
ctx := context.Background() // Note: NewFs does not pass context!
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -314,10 +316,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// However the f.root string should not have leading or trailing slashes
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
m: m,
|
||||
}
|
||||
|
||||
@@ -326,7 +330,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
f.quirks.parseQuirks(opt.Quirks)
|
||||
|
||||
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -334,27 +338,21 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Can copy/move across mailru configs (almost, thus true here), but
|
||||
// only when they share common account (this is checked in Copy/Move).
|
||||
ServerSideAcrossConfigs: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Override few config settings and create a client
|
||||
clientConfig := *fs.Config
|
||||
newCtx, clientConfig := fs.AddConfig(ctx)
|
||||
if opt.UserAgent != "" {
|
||||
clientConfig.UserAgent = opt.UserAgent
|
||||
}
|
||||
clientConfig.NoGzip = !f.quirks.gzip // Send not "Accept-Encoding: gzip" like official client
|
||||
f.cli = fshttp.NewClient(&clientConfig)
|
||||
clientConfig.NoGzip = true // Mimic official client, skip sending "Accept-Encoding: gzip"
|
||||
f.cli = fshttp.NewClient(newCtx)
|
||||
|
||||
f.srv = rest.NewClient(f.cli)
|
||||
f.srv.SetRoot(api.APIServerURL)
|
||||
f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
if f.quirks.insecure {
|
||||
transport := f.cli.Transport.(*fshttp.Transport).Transport
|
||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||
transport.ProxyConnectHeader = http.Header{"User-Agent": {clientConfig.UserAgent}}
|
||||
}
|
||||
|
||||
if err = f.authorize(ctx, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -387,30 +385,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Internal maintenance flags (to be removed when the backend matures).
|
||||
// Primarily intended to facilitate remote support and troubleshooting.
|
||||
type quirks struct {
|
||||
gzip bool
|
||||
insecure bool
|
||||
binlist bool
|
||||
atomicmkdir bool
|
||||
retry400 bool
|
||||
unknowndirs bool
|
||||
}
|
||||
|
||||
func (q *quirks) parseQuirks(option string) {
|
||||
for _, flag := range strings.Split(option, ",") {
|
||||
switch strings.ToLower(strings.TrimSpace(flag)) {
|
||||
case "gzip":
|
||||
// This backend mimics the official client which never sends the
|
||||
// "Accept-Encoding: gzip" header. However, enabling compression
|
||||
// might be good for performance.
|
||||
// Use this quirk to investigate the performance impact.
|
||||
// Remove this quirk if performance does not improve.
|
||||
q.gzip = true
|
||||
case "insecure":
|
||||
// The mailru disk-o protocol is not documented. To compare HTTP
|
||||
// stream against the official client one can use Telerik Fiddler,
|
||||
// which introduces a self-signed certificate. This quirk forces
|
||||
// the Go http layer to accept it.
|
||||
// Remove this quirk when the backend reaches maturity.
|
||||
q.insecure = true
|
||||
case "binlist":
|
||||
// The official client sometimes uses a so called "bin" protocol,
|
||||
// implemented in the listBin file system method below. This method
|
||||
@@ -423,18 +405,14 @@ func (q *quirks) parseQuirks(option string) {
|
||||
case "atomicmkdir":
|
||||
// At the moment rclone requires Mkdir to return success if the
|
||||
// directory already exists. However, such programs as borgbackup
|
||||
// or restic use mkdir as a locking primitive and depend on its
|
||||
// atomicity. This quirk is a workaround. It can be removed
|
||||
// when the above issue is investigated.
|
||||
// use mkdir as a locking primitive and depend on its atomicity.
|
||||
// Remove this quirk when the above issue is investigated.
|
||||
q.atomicmkdir = true
|
||||
case "retry400":
|
||||
// This quirk will help in troubleshooting a very rare "Error 400"
|
||||
// issue. It can be removed if the problem does not show up
|
||||
// for a year or so. See the below issue:
|
||||
// https://github.com/ivandeex/rclone/issues/14
|
||||
q.retry400 = true
|
||||
case "unknowndirs":
|
||||
// Accepts unknown resource types as folders.
|
||||
q.unknowndirs = true
|
||||
default:
|
||||
// Just ignore all unknown flags
|
||||
// Ignore unknown flags
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -448,7 +426,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
||||
|
||||
if err != nil || !tokenIsValid(t) {
|
||||
fs.Infof(f, "Valid token not found, authorizing.")
|
||||
ctx := oauthutil.Context(f.cli)
|
||||
ctx := oauthutil.Context(ctx, f.cli)
|
||||
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||
}
|
||||
if err == nil && !tokenIsValid(t) {
|
||||
@@ -471,7 +449,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
||||
// crashing with panic `comparing uncomparable type map[string]interface{}`
|
||||
// As a workaround, mimic oauth2.NewClient() wrapping token source in
|
||||
// oauth2.ReuseTokenSource
|
||||
_, ts, err := oauthutil.NewClientWithBaseClient(f.name, f.m, oauthConfig, f.cli)
|
||||
_, ts, err := oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, f.cli)
|
||||
if err == nil {
|
||||
f.source = oauth2.ReuseTokenSource(nil, ts)
|
||||
}
|
||||
@@ -550,7 +528,7 @@ func (f *Fs) relPath(absPath string) (string, error) {
|
||||
return "", fmt.Errorf("path %q should be under %q", absPath, f.root)
|
||||
}
|
||||
|
||||
// metaServer ...
|
||||
// metaServer returns URL of current meta server
|
||||
func (f *Fs) metaServer(ctx context.Context) (string, error) {
|
||||
f.metaMu.Lock()
|
||||
defer f.metaMu.Unlock()
|
||||
@@ -625,7 +603,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
||||
var info api.ItemInfoResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -655,28 +633,56 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
switch item.Kind {
|
||||
case "folder":
|
||||
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
|
||||
dirSize := item.Count.Files + item.Count.Folders
|
||||
return dir, dirSize, nil
|
||||
case "file":
|
||||
binHash, err := mrhash.DecodeString(item.Hash)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
hasMetaData: true,
|
||||
size: item.Size,
|
||||
mrHash: binHash,
|
||||
modTime: time.Unix(item.Mtime, 0),
|
||||
}
|
||||
return file, -1, nil
|
||||
default:
|
||||
return nil, -1, fmt.Errorf("Unknown resource type %q", item.Kind)
|
||||
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
modTime := time.Unix(mTime, 0)
|
||||
|
||||
isDir, err := f.isDir(item.Kind, remote)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if isDir {
|
||||
dir := fs.NewDir(remote, modTime).SetSize(item.Size)
|
||||
return dir, item.Count.Files + item.Count.Folders, nil
|
||||
}
|
||||
|
||||
binHash, err := mrhash.DecodeString(item.Hash)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
hasMetaData: true,
|
||||
size: item.Size,
|
||||
mrHash: binHash,
|
||||
modTime: modTime,
|
||||
}
|
||||
return file, -1, nil
|
||||
}
|
||||
|
||||
// isDir returns true for directories, false for files
|
||||
func (f *Fs) isDir(kind, path string) (bool, error) {
|
||||
switch kind {
|
||||
case "":
|
||||
return false, errors.New("empty resource type")
|
||||
case "file":
|
||||
return false, nil
|
||||
case "folder":
|
||||
// fall thru
|
||||
case "camera-upload", "mounted", "shared":
|
||||
fs.Debugf(f, "[%s]: folder has type %q", path, kind)
|
||||
default:
|
||||
if !f.quirks.unknowndirs {
|
||||
return false, fmt.Errorf("unknown resource type %q", kind)
|
||||
}
|
||||
fs.Errorf(f, "[%s]: folder has unknown type %q", path, kind)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
@@ -692,7 +698,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32)
|
||||
}
|
||||
|
||||
if err == nil && fs.Config.LogLevel >= fs.LogLevelDebug {
|
||||
if err == nil && f.ci.LogLevel >= fs.LogLevelDebug {
|
||||
names := []string{}
|
||||
for _, entry := range entries {
|
||||
names = append(names, entry.Remote())
|
||||
@@ -733,7 +739,7 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
|
||||
)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -744,7 +750,11 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.Body.Kind != "folder" {
|
||||
isDir, err := f.isDir(info.Body.Kind, dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !isDir {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
@@ -793,7 +803,7 @@ func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs
|
||||
var res *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
@@ -952,7 +962,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
|
||||
return nil, r.Error()
|
||||
}
|
||||
|
||||
if fs.Config.LogLevel >= fs.LogLevelDebug {
|
||||
if t.f.ci.LogLevel >= fs.LogLevelDebug {
|
||||
ctime, _ := modTime.MarshalJSON()
|
||||
fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime)
|
||||
}
|
||||
@@ -1066,7 +1076,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) error {
|
||||
var res *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
@@ -1209,7 +1219,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
|
||||
var response api.GenericResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
|
||||
switch {
|
||||
@@ -1222,7 +1232,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
// This is stored with the remote path given.
|
||||
// It returns the destination Object and a possible error.
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
@@ -1281,7 +1291,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var response api.GenericBodyResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -1317,7 +1327,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
// This is stored with the remote path given.
|
||||
// It returns the destination Object and a possible error.
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
@@ -1385,7 +1395,7 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
|
||||
var res *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
@@ -1404,7 +1414,7 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
@@ -1476,7 +1486,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var response api.GenericBodyResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
|
||||
if err == nil && response.Body != "" {
|
||||
@@ -1517,7 +1527,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
var response api.CleanupResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1550,7 +1560,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var info api.UserInfoResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1597,23 +1607,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
var (
|
||||
fileBuf []byte
|
||||
fileHash []byte
|
||||
newHash []byte
|
||||
trySpeedup bool
|
||||
fileBuf []byte
|
||||
fileHash []byte
|
||||
newHash []byte
|
||||
slowHash bool
|
||||
localSrc bool
|
||||
)
|
||||
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil {
|
||||
srcFeatures := srcObj.Fs().Features()
|
||||
slowHash = srcFeatures.SlowHash
|
||||
localSrc = srcFeatures.IsLocal
|
||||
}
|
||||
|
||||
// Don't disturb the source if file fits in hash.
|
||||
// Skip an extra speedup request if file fits in hash.
|
||||
if size > mrhash.Size {
|
||||
// Request hash from source.
|
||||
// Try speedup if it's globally enabled but skip extra post
|
||||
// request if file is small and fits in the metadata request
|
||||
trySpeedup := o.fs.opt.SpeedupEnable && size > mrhash.Size
|
||||
|
||||
// Try to get the hash if it's instant
|
||||
if trySpeedup && !slowHash {
|
||||
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||
}
|
||||
|
||||
// Try speedup if it's globally enabled and source hash is available.
|
||||
trySpeedup = o.fs.opt.SpeedupEnable
|
||||
if trySpeedup && fileHash != nil {
|
||||
if fileHash != nil {
|
||||
if o.putByHash(ctx, fileHash, src, "source") {
|
||||
return nil
|
||||
}
|
||||
@@ -1622,13 +1637,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Need to calculate hash, check whether file is still eligible for speedup
|
||||
if trySpeedup {
|
||||
trySpeedup = o.fs.eligibleForSpeedup(o.Remote(), size, options...)
|
||||
trySpeedup = trySpeedup && o.fs.eligibleForSpeedup(o.Remote(), size, options...)
|
||||
|
||||
// Attempt to put by hash if file is local and eligible
|
||||
if trySpeedup && localSrc {
|
||||
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||
}
|
||||
if fileHash != nil && o.putByHash(ctx, fileHash, src, "localfs") {
|
||||
return nil
|
||||
}
|
||||
// If local file hashing has failed, it's pointless to try anymore
|
||||
trySpeedup = false
|
||||
}
|
||||
|
||||
// Attempt to put by calculating hash in memory
|
||||
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
|
||||
//fs.Debugf(o, "attempt to put by hash from memory")
|
||||
fileBuf, err = ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1643,7 +1667,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Attempt to put by hash using a spool file
|
||||
if trySpeedup {
|
||||
tmpFs, err := fs.TemporaryLocalFs()
|
||||
tmpFs, err := fs.TemporaryLocalFs(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(tmpFs, "Failed to create spool FS: %v", err)
|
||||
} else {
|
||||
@@ -1758,6 +1782,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// putByHash is a thin wrapper around addFileMetaData
|
||||
func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
|
||||
oNew := new(Object)
|
||||
*oNew = *o
|
||||
@@ -1861,30 +1886,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
|
||||
return f.shardURL, nil
|
||||
}
|
||||
|
||||
token, err := f.accessToken()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/api/m1/dispatcher",
|
||||
Parameters: url.Values{
|
||||
"client_id": {api.OAuthClientID},
|
||||
"access_token": {token},
|
||||
},
|
||||
RootURL: api.DispatchServerURL,
|
||||
Method: "GET",
|
||||
Path: "/u",
|
||||
}
|
||||
|
||||
var info api.ShardInfoResponse
|
||||
var (
|
||||
res *http.Response
|
||||
url string
|
||||
err error
|
||||
)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
url, err = readBodyWord(res)
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return "", err
|
||||
}
|
||||
|
||||
f.shardURL = info.Body.Upload[0].URL
|
||||
f.shardURL = url
|
||||
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
||||
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
||||
|
||||
@@ -2054,7 +2079,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(res, err, o.fs, &opts)
|
||||
return shouldRetry(ctx, res, err, o.fs, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
@@ -2116,7 +2141,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start, end, partial := getTransferRange(o.size, options...)
|
||||
start, end, partialRequest := getTransferRange(o.size, options...)
|
||||
|
||||
headers := map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
if partialRequest {
|
||||
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
|
||||
headers["Range"] = rangeStr
|
||||
// headers["Content-Range"] = rangeStr
|
||||
headers["Accept-Ranges"] = "bytes"
|
||||
}
|
||||
|
||||
// TODO: set custom timeouts
|
||||
opts := rest.Opts{
|
||||
@@ -2127,10 +2163,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
"client_id": {api.OAuthClientID},
|
||||
"token": {token},
|
||||
},
|
||||
ExtraHeaders: map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
|
||||
},
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
@@ -2142,7 +2175,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
opts.RootURL = server
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(res, err, o.fs, &opts)
|
||||
return shouldRetry(ctx, res, err, o.fs, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
if res != nil && res.Body != nil {
|
||||
@@ -2151,18 +2184,37 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hasher gohash.Hash
|
||||
if !partial {
|
||||
// Server should respond with Status 206 and Content-Range header to a range
|
||||
// request. Status 200 (and no Content-Range) means a full-content response.
|
||||
partialResponse := res.StatusCode == 206
|
||||
|
||||
var (
|
||||
hasher gohash.Hash
|
||||
wrapStream io.ReadCloser
|
||||
)
|
||||
if !partialResponse {
|
||||
// Cannot check hash of partial download
|
||||
hasher = mrhash.New()
|
||||
}
|
||||
wrapStream := &endHandler{
|
||||
wrapStream = &endHandler{
|
||||
ctx: ctx,
|
||||
stream: res.Body,
|
||||
hasher: hasher,
|
||||
o: o,
|
||||
server: server,
|
||||
}
|
||||
if partialRequest && !partialResponse {
|
||||
fs.Debugf(o, "Server returned full content instead of range")
|
||||
if start > 0 {
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
|
||||
}
|
||||
return wrapStream, nil
|
||||
}
|
||||
|
||||
@@ -2215,7 +2267,7 @@ func (e *endHandler) handle(err error) error {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
// serverPool backs server dispacher
|
||||
// serverPool backs server dispatcher
|
||||
type serverPool struct {
|
||||
pool pendingServerMap
|
||||
mu sync.Mutex
|
||||
@@ -2330,7 +2382,7 @@ func (p *serverPool) addServer(url string, now time.Time) {
|
||||
expiry := now.Add(p.expirySec * time.Second)
|
||||
|
||||
expiryStr := []byte("-")
|
||||
if fs.Config.LogLevel >= fs.LogLevelInfo {
|
||||
if p.fs.ci.LogLevel >= fs.LogLevelInfo {
|
||||
expiryStr, _ = expiry.MarshalJSON()
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ Improvements:
|
||||
* Uploads could be done in parallel
|
||||
* Downloads would be more efficient done in one go
|
||||
* Uploads would be more efficient with bigger chunks
|
||||
* Looks like mega can support server side copy, but it isn't implemented in go-mega
|
||||
* Looks like mega can support server-side copy, but it isn't implemented in go-mega
|
||||
* Upload can set modtime... - set as int64_t - can set ctime and mtime?
|
||||
*/
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -158,7 +159,10 @@ func parsePath(path string) (root string) {
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Let the mega library handle the low level retries
|
||||
return false, err
|
||||
/*
|
||||
@@ -171,8 +175,8 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
|
||||
rootNode, err := f.findRoot(false)
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, remote string) (info *mega.Node, err error) {
|
||||
rootNode, err := f.findRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -180,7 +184,7 @@ func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -194,6 +198,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// cache *mega.Mega on username so we can re-use and share
|
||||
// them between remotes. They are expensive to make as they
|
||||
@@ -204,8 +209,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
defer megaCacheMu.Unlock()
|
||||
srv := megaCache[opt.User]
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
|
||||
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
@@ -228,15 +233,15 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: srv,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Find the root node and check if it is a file or not
|
||||
_, err = f.findRoot(false)
|
||||
_, err = f.findRoot(ctx, false)
|
||||
switch err {
|
||||
case nil:
|
||||
// root node found and is a directory
|
||||
@@ -306,8 +311,8 @@ func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err
|
||||
// lookupDir looks up the node for the directory of the name given
|
||||
//
|
||||
// if create is true it tries to create the root directory if not found
|
||||
func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
|
||||
rootNode, err := f.findRoot(false)
|
||||
func (f *Fs) lookupDir(ctx context.Context, dir string) (*mega.Node, error) {
|
||||
rootNode, err := f.findRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -315,15 +320,15 @@ func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
|
||||
}
|
||||
|
||||
// lookupParentDir finds the parent node for the remote passed in
|
||||
func (f *Fs) lookupParentDir(remote string) (dirNode *mega.Node, leaf string, err error) {
|
||||
func (f *Fs) lookupParentDir(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
|
||||
parent, leaf := path.Split(remote)
|
||||
dirNode, err = f.lookupDir(parent)
|
||||
dirNode, err = f.lookupDir(ctx, parent)
|
||||
return dirNode, leaf, err
|
||||
}
|
||||
|
||||
// mkdir makes the directory and any parent directories for the
|
||||
// directory of the name given
|
||||
func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
|
||||
func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *mega.Node, err error) {
|
||||
f.mkdirMu.Lock()
|
||||
defer f.mkdirMu.Unlock()
|
||||
|
||||
@@ -357,7 +362,7 @@ func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error)
|
||||
// create directory called name in node
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
node, err = f.srv.CreateDir(name, node)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "mkdir create node failed")
|
||||
@@ -367,20 +372,20 @@ func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error)
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent directory of remote
|
||||
func (f *Fs) mkdirParent(remote string) (dirNode *mega.Node, leaf string, err error) {
|
||||
rootNode, err := f.findRoot(true)
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
|
||||
rootNode, err := f.findRoot(ctx, true)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
parent, leaf := path.Split(remote)
|
||||
dirNode, err = f.mkdir(rootNode, parent)
|
||||
dirNode, err = f.mkdir(ctx, rootNode, parent)
|
||||
return dirNode, leaf, err
|
||||
}
|
||||
|
||||
// findRoot looks up the root directory node and returns it.
|
||||
//
|
||||
// if create is true it tries to create the root directory if not found
|
||||
func (f *Fs) findRoot(create bool) (*mega.Node, error) {
|
||||
func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
|
||||
f.rootNodeMu.Lock()
|
||||
defer f.rootNodeMu.Unlock()
|
||||
|
||||
@@ -402,7 +407,7 @@ func (f *Fs) findRoot(create bool) (*mega.Node, error) {
|
||||
}
|
||||
|
||||
//..not found so create the root directory
|
||||
f._rootNode, err = f.mkdir(absRoot, f.root)
|
||||
f._rootNode, err = f.mkdir(ctx, absRoot, f.root)
|
||||
return f._rootNode, err
|
||||
}
|
||||
|
||||
@@ -432,7 +437,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
|
||||
deleteErr := f.pacer.Call(func() (bool, error) {
|
||||
err := f.srv.Delete(item, true)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if deleteErr != nil {
|
||||
err = deleteErr
|
||||
@@ -446,7 +451,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.Node) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -456,7 +461,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -467,7 +472,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
@@ -505,7 +510,7 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
dirNode, err := f.lookupDir(dir)
|
||||
dirNode, err := f.lookupDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -517,7 +522,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
|
||||
entries = append(entries, d)
|
||||
case mega.FILE:
|
||||
o, err := f.newObjectWithInfo(remote, info)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
@@ -541,8 +546,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Returns the dirNode, object, leaf and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||
dirNode, leaf, err = f.mkdirParent(remote)
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||
dirNode, leaf, err = f.mkdirParent(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, nil, leaf, err
|
||||
}
|
||||
@@ -564,7 +569,7 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
@@ -590,7 +595,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
size := src.Size()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
o, _, _, err := f.createObject(remote, modTime, size)
|
||||
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -599,30 +604,30 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
rootNode, err := f.findRoot(true)
|
||||
rootNode, err := f.findRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.mkdir(rootNode, dir)
|
||||
_, err = f.mkdir(ctx, rootNode, dir)
|
||||
return errors.Wrap(err, "Mkdir failed")
|
||||
}
|
||||
|
||||
// deleteNode removes a file or directory, observing useTrash
|
||||
func (f *Fs) deleteNode(node *mega.Node) (err error) {
|
||||
func (f *Fs) deleteNode(ctx context.Context, node *mega.Node) (err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Delete(node, f.opt.HardDelete)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// purgeCheck removes the directory dir, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
f.mkdirMu.Lock()
|
||||
defer f.mkdirMu.Unlock()
|
||||
|
||||
rootNode, err := f.findRoot(false)
|
||||
rootNode, err := f.findRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -643,7 +648,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
|
||||
waitEvent := f.srv.WaitEventsStart()
|
||||
|
||||
err = f.deleteNode(dirNode)
|
||||
err = f.deleteNode(ctx, dirNode)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "delete directory node failed")
|
||||
}
|
||||
@@ -661,7 +666,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
@@ -675,13 +680,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(dir, false)
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
|
||||
//
|
||||
// info will be updates
|
||||
func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
|
||||
func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
|
||||
var (
|
||||
dstFs = f
|
||||
srcDirNode, dstDirNode *mega.Node
|
||||
@@ -691,20 +696,20 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
|
||||
if dstRemote != "" {
|
||||
// lookup or create the destination parent directory
|
||||
dstDirNode, dstLeaf, err = dstFs.mkdirParent(dstRemote)
|
||||
dstDirNode, dstLeaf, err = dstFs.mkdirParent(ctx, dstRemote)
|
||||
} else {
|
||||
// find or create the parent of the root directory
|
||||
absRoot := dstFs.srv.FS.GetRoot()
|
||||
dstParent, dstLeaf = path.Split(dstFs.root)
|
||||
dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
|
||||
dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "server side move failed to make dst parent dir")
|
||||
return errors.Wrap(err, "server-side move failed to make dst parent dir")
|
||||
}
|
||||
|
||||
if srcRemote != "" {
|
||||
// lookup the existing parent directory
|
||||
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(srcRemote)
|
||||
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(ctx, srcRemote)
|
||||
} else {
|
||||
// lookup the existing root parent
|
||||
absRoot := srcFs.srv.FS.GetRoot()
|
||||
@@ -712,7 +717,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
srcDirNode, err = f.findDir(absRoot, srcParent)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "server side move failed to lookup src parent dir")
|
||||
return errors.Wrap(err, "server-side move failed to lookup src parent dir")
|
||||
}
|
||||
|
||||
// move the object into its new directory if required
|
||||
@@ -720,10 +725,10 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
//log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Move(info, dstDirNode)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "server side move failed")
|
||||
return errors.Wrap(err, "server-side move failed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -734,10 +739,10 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "server side rename failed")
|
||||
return errors.Wrap(err, "server-side rename failed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -746,7 +751,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
return nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -766,7 +771,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err := f.move(remote, srcObj.fs, srcObj.remote, srcObj.info)
|
||||
err := f.move(ctx, remote, srcObj.fs, srcObj.remote, srcObj.info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -781,7 +786,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -797,13 +802,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// find the source
|
||||
info, err := srcFs.lookupDir(srcRemote)
|
||||
info, err := srcFs.lookupDir(ctx, srcRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check the destination doesn't exist
|
||||
_, err = dstFs.lookupDir(dstRemote)
|
||||
_, err = dstFs.lookupDir(ctx, dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
} else if err != fs.ErrorDirNotFound {
|
||||
@@ -811,7 +816,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = f.move(dstRemote, srcFs, srcRemote, info)
|
||||
err = f.move(ctx, dstRemote, srcFs, srcRemote, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -837,7 +842,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
root, err := f.findRoot(false)
|
||||
root, err := f.findRoot(ctx, false)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "PublicLink failed to find root node")
|
||||
}
|
||||
@@ -885,7 +890,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Move(info, dstDirNode)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
|
||||
@@ -893,7 +898,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.deleteNode(srcDirNode)
|
||||
err = f.deleteNode(ctx, srcDirNode)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||
}
|
||||
@@ -907,7 +912,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
q, err = f.srv.GetQuota()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get Mega Quota")
|
||||
@@ -962,11 +967,11 @@ func (o *Object) setMetaData(info *mega.Node) (err error) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.info != nil {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
err = fs.ErrorObjectNotFound
|
||||
@@ -997,6 +1002,7 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// openObject represents a download in progress
|
||||
type openObject struct {
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
o *Object
|
||||
d *mega.Download
|
||||
@@ -1007,14 +1013,14 @@ type openObject struct {
|
||||
}
|
||||
|
||||
// get the next chunk
|
||||
func (oo *openObject) getChunk() (err error) {
|
||||
func (oo *openObject) getChunk(ctx context.Context) (err error) {
|
||||
if oo.id >= oo.d.Chunks() {
|
||||
return io.EOF
|
||||
}
|
||||
var chunk []byte
|
||||
err = oo.o.fs.pacer.Call(func() (bool, error) {
|
||||
chunk, err = oo.d.DownloadChunk(oo.id)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1044,7 +1050,7 @@ func (oo *openObject) Read(p []byte) (n int, err error) {
|
||||
oo.skip -= int64(size)
|
||||
}
|
||||
if len(oo.chunk) == 0 {
|
||||
err = oo.getChunk()
|
||||
err = oo.getChunk(oo.ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -1067,7 +1073,7 @@ func (oo *openObject) Close() (err error) {
|
||||
}
|
||||
err = oo.o.fs.pacer.Call(func() (bool, error) {
|
||||
err = oo.d.Finish()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(oo.ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to finish download")
|
||||
@@ -1095,13 +1101,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var d *mega.Download
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
d, err = o.fs.srv.NewDownload(o.info)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open download file failed")
|
||||
}
|
||||
|
||||
oo := &openObject{
|
||||
ctx: ctx,
|
||||
o: o,
|
||||
d: d,
|
||||
skip: offset,
|
||||
@@ -1124,7 +1131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
remote := o.Remote()
|
||||
|
||||
// Create the parent directory
|
||||
dirNode, leaf, err := o.fs.mkdirParent(remote)
|
||||
dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "update make parent dir failed")
|
||||
}
|
||||
@@ -1132,7 +1139,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var u *mega.Upload
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload file failed to create session")
|
||||
@@ -1153,7 +1160,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = u.UploadChunk(id, chunk)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload file failed to upload chunk")
|
||||
@@ -1164,7 +1171,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var info *mega.Node
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, err = u.Finish()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to finish upload")
|
||||
@@ -1172,7 +1179,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// If the upload succeeded and the original object existed, then delete it
|
||||
if o.info != nil {
|
||||
err = o.fs.deleteNode(o.info)
|
||||
err = o.fs.deleteNode(ctx, o.info)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload failed to remove old version")
|
||||
}
|
||||
@@ -1184,7 +1191,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
err := o.fs.deleteNode(o.info)
|
||||
err := o.fs.deleteNode(ctx, o.info)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove object failed")
|
||||
}
|
||||
|
||||
@@ -221,8 +221,8 @@ func (f *Fs) setRoot(root string) {
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -241,7 +241,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
od := buckets.getObjectData(f.rootBucket, f.rootDirectory)
|
||||
if od != nil {
|
||||
@@ -462,7 +462,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -592,7 +592,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
data: data,
|
||||
hash: "",
|
||||
modTime: src.ModTime(ctx),
|
||||
mimeType: fs.MimeType(ctx, o),
|
||||
mimeType: fs.MimeType(ctx, src),
|
||||
}
|
||||
buckets.updateObjectData(bucket, bucketPath, o.od)
|
||||
return nil
|
||||
|
||||
@@ -253,8 +253,10 @@ type MoveItemRequest struct {
|
||||
//CreateShareLinkRequest is the request to create a sharing link
|
||||
//Always Type:view and Scope:anonymous for public sharing
|
||||
type CreateShareLinkRequest struct {
|
||||
Type string `json:"type"` //Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
|
||||
Type string `json:"type"` // Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` // Scope in anonymous, organization
|
||||
Password string `json:"password,omitempty"` // The password of the sharing link that is set by the creator. Optional and OneDrive Personal only.
|
||||
Expiry *time.Time `json:"expirationDateTime,omitempty"` // A String with format of yyyy-MM-ddTHH:mm:ssZ of DateTime indicates the expiration time of the permission.
|
||||
}
|
||||
|
||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
@@ -281,6 +283,7 @@ type CreateShareLinkResponse struct {
|
||||
type AsyncOperationStatus struct {
|
||||
PercentageComplete float64 `json:"percentageComplete"` // A float value between 0 and 100 that indicates the percentage complete.
|
||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||
ErrorCode string `json:"errorCode"` // Not officially documented :(
|
||||
}
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
|
||||
@@ -11,7 +11,9 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -45,7 +47,6 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
graphURL = "https://graph.microsoft.com/v1.0"
|
||||
configDriveID = "drive_id"
|
||||
configDriveType = "drive_type"
|
||||
driveTypePersonal = "personal"
|
||||
@@ -53,22 +54,40 @@ const (
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.MebiByte
|
||||
chunkSizeMultiple = 320 * fs.KibiByte
|
||||
|
||||
regionGlobal = "global"
|
||||
regionUS = "us"
|
||||
regionDE = "de"
|
||||
regionCN = "cn"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
authPath = "/common/oauth2/v2.0/authorize"
|
||||
tokenPath = "/common/oauth2/v2.0/token"
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
|
||||
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
|
||||
},
|
||||
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
|
||||
graphAPIEndpoint = map[string]string{
|
||||
"global": "https://graph.microsoft.com",
|
||||
"us": "https://graph.microsoft.us",
|
||||
"de": "https://graph.microsoft.de",
|
||||
"cn": "https://microsoftgraph.chinacloudapi.cn",
|
||||
}
|
||||
|
||||
authEndpoint = map[string]string{
|
||||
"global": "https://login.microsoftonline.com",
|
||||
"us": "https://login.microsoftonline.us",
|
||||
"de": "https://login.microsoftonline.de",
|
||||
"cn": "https://login.chinacloudapi.cn",
|
||||
}
|
||||
|
||||
// QuickXorHashType is the hash.Type for OneDrive
|
||||
QuickXorHashType hash.Type
|
||||
)
|
||||
@@ -80,16 +99,22 @@ func init() {
|
||||
Name: "onedrive",
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
err := oauthutil.Config("onedrive", name, m, oauthConfig, nil)
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
region, _ := m.Get("region")
|
||||
graphURL := graphAPIEndpoint[region] + "/v1.0"
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
if ci.AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -111,7 +136,7 @@ func init() {
|
||||
Sites []siteResource `json:"value"`
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
}
|
||||
@@ -120,9 +145,18 @@ func init() {
|
||||
var opts rest.Opts
|
||||
var finalDriveID string
|
||||
var siteID string
|
||||
var relativePath string
|
||||
switch config.Choose("Your choice",
|
||||
[]string{"onedrive", "sharepoint", "driveid", "siteid", "search"},
|
||||
[]string{"OneDrive Personal or Business", "Root Sharepoint site", "Type in driveID", "Type in SiteID", "Search a Sharepoint site"},
|
||||
[]string{"onedrive", "sharepoint", "url", "search", "driveid", "siteid", "path"},
|
||||
[]string{
|
||||
"OneDrive Personal or Business",
|
||||
"Root Sharepoint site",
|
||||
"Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
|
||||
"Search for a Sharepoint site",
|
||||
"Type in driveID (advanced)",
|
||||
"Type in SiteID (advanced)",
|
||||
"Sharepoint server-relative path (advanced, e.g. /teams/hr)",
|
||||
},
|
||||
false) {
|
||||
|
||||
case "onedrive":
|
||||
@@ -143,6 +177,20 @@ func init() {
|
||||
case "siteid":
|
||||
fmt.Printf("Paste your Site ID here> ")
|
||||
siteID = config.ReadLine()
|
||||
case "url":
|
||||
fmt.Println("Example: \"https://contoso.sharepoint.com/sites/mysite\" or \"mysite\"")
|
||||
fmt.Printf("Paste your Site URL here> ")
|
||||
siteURL := config.ReadLine()
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
relativePath = "/sites/" + match[1]
|
||||
} else {
|
||||
relativePath = "/sites/" + siteURL
|
||||
}
|
||||
case "path":
|
||||
fmt.Printf("Enter server-relative URL here> ")
|
||||
relativePath = config.ReadLine()
|
||||
case "search":
|
||||
fmt.Printf("What to search for> ")
|
||||
searchTerm := config.ReadLine()
|
||||
@@ -169,6 +217,21 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// if we use server-relative URL for finding the drive
|
||||
if relativePath != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root:" + relativePath,
|
||||
}
|
||||
site := siteResource{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &site)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available site by relative path: %v", err)
|
||||
}
|
||||
siteID = site.SiteID
|
||||
}
|
||||
|
||||
// if we have a siteID we need to ask for the drives
|
||||
if siteID != "" {
|
||||
opts = rest.Opts{
|
||||
@@ -233,15 +296,33 @@ func init() {
|
||||
|
||||
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
||||
// This does not work, YET :)
|
||||
if !config.ConfirmWithConfig(m, "config_drive_ok", true) {
|
||||
if !config.ConfirmWithConfig(ctx, m, "config_drive_ok", true) {
|
||||
log.Fatalf("Cancelled by user")
|
||||
}
|
||||
|
||||
m.Set(configDriveID, finalDriveID)
|
||||
m.Set(configDriveType, rootItem.ParentReference.DriveType)
|
||||
config.SaveConfig()
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "region",
|
||||
Help: "Choose national cloud region for OneDrive.",
|
||||
Default: "global",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: regionGlobal,
|
||||
Help: "Microsoft Cloud Global",
|
||||
}, {
|
||||
Value: regionUS,
|
||||
Help: "Microsoft Cloud for US Government",
|
||||
}, {
|
||||
Value: regionDE,
|
||||
Help: "Microsoft Cloud Germany",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by 21Vianet in China",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
|
||||
@@ -274,12 +355,16 @@ listing, set this option.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server side operations (eg copy) to work across different onedrive configs.
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||
|
||||
This can be useful if you wish to do a server side copy between two
|
||||
different Onedrives. Note that this isn't enabled by default
|
||||
because it isn't easy to tell if it will work between any two
|
||||
configurations.`,
|
||||
This will only work if you are copying between two OneDrive *Personal* drives AND
|
||||
the files to copy are already shared between them. In other cases, rclone will
|
||||
fall back to normal copy (which will be slightly slower).`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: "Size of listing chunk.",
|
||||
Default: 1000,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_versions",
|
||||
@@ -296,6 +381,41 @@ modification time and removes all but the last version.
|
||||
|
||||
**NB** Onedrive personal can't currently delete versions so don't use
|
||||
this flag there.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "link_scope",
|
||||
Default: "anonymous",
|
||||
Help: `Set the scope of the links created by the link command.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "anonymous",
|
||||
Help: "Anyone with the link has access, without needing to sign in. This may include people outside of your organization. Anonymous link support may be disabled by an administrator.",
|
||||
}, {
|
||||
Value: "organization",
|
||||
Help: "Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "link_type",
|
||||
Default: "view",
|
||||
Help: `Set the type of the links created by the link command.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "view",
|
||||
Help: "Creates a read-only link to the item.",
|
||||
}, {
|
||||
Value: "edit",
|
||||
Help: "Creates a read-write link to the item.",
|
||||
}, {
|
||||
Value: "embed",
|
||||
Help: "Creates an embeddable link to the item.",
|
||||
}},
|
||||
}, {
|
||||
Name: "link_password",
|
||||
Default: "",
|
||||
Help: `Set the password for links created by the link command.
|
||||
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -311,8 +431,6 @@ this flag there.
|
||||
// | (vertical line) -> '|' // FULLWIDTH VERTICAL LINE
|
||||
// ? (question mark) -> '?' // FULLWIDTH QUESTION MARK
|
||||
// * (asterisk) -> '*' // FULLWIDTH ASTERISK
|
||||
// # (number sign) -> '#' // FULLWIDTH NUMBER SIGN
|
||||
// % (percent sign) -> '%' // FULLWIDTH PERCENT SIGN
|
||||
//
|
||||
// Folder names cannot begin with a tilde ('~')
|
||||
// List of replaced characters:
|
||||
@@ -337,7 +455,6 @@ this flag there.
|
||||
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeHashPercent |
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftTilde |
|
||||
encoder.EncodeRightPeriod |
|
||||
@@ -350,12 +467,17 @@ this flag there.
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Region string `config:"region"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
NoVersions bool `config:"no_versions"`
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -364,6 +486,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -427,9 +550,15 @@ var retryErrorCodes = []int{
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
var gatewayTimeoutError sync.Once
|
||||
var errAsyncJobAccessDenied = errors.New("async job failed - access denied")
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
retry := false
|
||||
if resp != nil {
|
||||
switch resp.StatusCode {
|
||||
@@ -451,6 +580,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
|
||||
}
|
||||
}
|
||||
case 504: // Gateway timeout
|
||||
gatewayTimeoutError.Do(func() {
|
||||
fs.Errorf(nil, "%v: upload chunks may be taking too long - try reducing --onedrive-chunk-size or decreasing --transfers", err)
|
||||
})
|
||||
case 507: // Insufficient Storage
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
@@ -468,13 +601,11 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
//
|
||||
// If `relPath` == '', do not append the slash (See #3664)
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
if relPath != "" {
|
||||
relPath = "/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(relPath)))
|
||||
}
|
||||
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
|
||||
opts, _ := f.newOptsCallWithIDPath(normalizedID, relPath, true, "GET", "")
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
return info, resp, err
|
||||
@@ -486,20 +617,11 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
|
||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(f.opt.Enc.FromStandardPath(path)),
|
||||
}
|
||||
}
|
||||
opts = f.newOptsCallWithPath(ctx, path, "GET", "")
|
||||
opts.Path = strings.TrimSuffix(opts.Path, ":")
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
}
|
||||
@@ -590,8 +712,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -608,27 +729,35 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
}
|
||||
|
||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
driveID: opt.DriveID,
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
@@ -741,14 +870,14 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
opts := newOptsCall(dirID, "POST", "/children")
|
||||
opts := f.newOptsCall(dirID, "POST", "/children")
|
||||
mkdir := api.CreateItemRequest{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -773,14 +902,14 @@ type listAllFn func(*api.Item) bool
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -912,12 +1041,12 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
opts := newOptsCall(id, "DELETE", "")
|
||||
opts := f.newOptsCall(id, "DELETE", "")
|
||||
opts.NoResponse = true
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -967,7 +1096,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// waitForJob waits for the job with status in url to complete
|
||||
func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
deadline := time.Now().Add(fs.Config.Timeout)
|
||||
deadline := time.Now().Add(f.ci.TimeoutOrInfinite())
|
||||
for time.Now().Before(deadline) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
@@ -992,10 +1121,12 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
|
||||
switch status.Status {
|
||||
case "failed":
|
||||
case "deleteFailed":
|
||||
{
|
||||
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
|
||||
if strings.HasPrefix(status.ErrorCode, "AccessDenied_") {
|
||||
return errAsyncJobAccessDenied
|
||||
}
|
||||
fallthrough
|
||||
case "deleteFailed":
|
||||
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
|
||||
case "completed":
|
||||
err = o.readMetaData(ctx)
|
||||
return errors.Wrapf(err, "async operation completed but readMetaData failed")
|
||||
@@ -1003,10 +1134,10 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
|
||||
return errors.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -1021,6 +1152,17 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if f.driveType != srcObj.fs.driveType {
|
||||
fs.Debugf(src, "Can't server-side copy - drive types differ")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// For OneDrive Business, this is only supported within the same drive
|
||||
if f.driveType != driveTypePersonal && srcObj.fs.driveID != f.driveID {
|
||||
fs.Debugf(src, "Can't server-side copy - cross-drive but not OneDrive Personal")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
err := srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1042,11 +1184,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := newOptsCall(srcObj.id, "POST", "/copy")
|
||||
// The query param is a workaround for OneDrive Business for #4590
|
||||
opts := f.newOptsCall(srcObj.id, "POST", "/copy?@microsoft.graph.conflictBehavior=replace")
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
id, dstDriveID, _ := f.parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := f.opt.Enc.FromStandardName(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
@@ -1059,7 +1202,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©Req, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1073,6 +1216,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Wait for job to finish
|
||||
err = f.waitForJob(ctx, location, dstObj)
|
||||
if err == errAsyncJobAccessDenied {
|
||||
fs.Debugf(src, "Server-side copy failed - file not shared between drives")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1097,7 +1244,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -1119,8 +1266,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
|
||||
id, dstDriveID, _ := f.parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := f.parseNormalizedID(srcObj.id)
|
||||
|
||||
if f.canonicalDriveID(dstDriveID) != srcObj.fs.canonicalDriveID(srcObjDriveID) {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
@@ -1130,7 +1277,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Move the object
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
opts := f.newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
@@ -1148,7 +1295,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1162,7 +1309,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1181,8 +1328,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return err
|
||||
}
|
||||
|
||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
||||
_, srcDriveID, _ := parseNormalizedID(srcID)
|
||||
parsedDstDirID, dstDriveID, _ := f.parseNormalizedID(dstDirectoryID)
|
||||
_, srcDriveID, _ := f.parseNormalizedID(srcID)
|
||||
|
||||
if f.canonicalDriveID(dstDriveID) != srcFs.canonicalDriveID(srcDriveID) {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
@@ -1198,7 +1345,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Do the move
|
||||
opts := newOptsCall(srcID, "PATCH", "")
|
||||
opts := f.newOptsCall(srcID, "PATCH", "")
|
||||
move := api.MoveItemRequest{
|
||||
Name: f.opt.Enc.FromStandardName(dstLeaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
@@ -1215,7 +1362,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1241,12 +1388,16 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
q := drive.Quota
|
||||
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.Used), // bytes in use
|
||||
@@ -1270,18 +1421,24 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
opts := newOptsCall(info.GetID(), "POST", "/createLink")
|
||||
opts := f.newOptsCall(info.GetID(), "POST", "/createLink")
|
||||
|
||||
share := api.CreateShareLinkRequest{
|
||||
Type: "view",
|
||||
Scope: "anonymous",
|
||||
Type: f.opt.LinkType,
|
||||
Scope: f.opt.LinkScope,
|
||||
Password: f.opt.LinkPassword,
|
||||
}
|
||||
|
||||
if expire < fs.DurationOff {
|
||||
expiry := time.Now().Add(time.Duration(expire))
|
||||
share.Expiry = &expiry
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var result api.CreateShareLinkResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
@@ -1292,7 +1449,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
|
||||
// CleanUp deletes all the hidden files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
token := make(chan struct{}, fs.Config.Checkers)
|
||||
token := make(chan struct{}, f.ci.Checkers)
|
||||
var wg sync.WaitGroup
|
||||
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
err = entries.ForObjectError(func(obj fs.Object) error {
|
||||
@@ -1322,11 +1479,11 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
|
||||
// Finds and removes any old versions for o
|
||||
func (o *Object) deleteVersions(ctx context.Context) error {
|
||||
opts := newOptsCall(o.id, "GET", "/versions")
|
||||
opts := o.fs.newOptsCall(o.id, "GET", "/versions")
|
||||
var versions api.VersionsResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1349,11 +1506,11 @@ func (o *Object) deleteVersion(ctx context.Context, ID string) error {
|
||||
return nil
|
||||
}
|
||||
fs.Infof(o, "removing version %q", ID)
|
||||
opts := newOptsCall(o.id, "DELETE", "/versions/"+ID)
|
||||
opts := o.fs.newOptsCall(o.id, "DELETE", "/versions/"+ID)
|
||||
opts.NoResponse = true
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1494,21 +1651,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
||||
var opts rest.Opts
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/root:/" + withTrailingColon(rest.URLPathEscape(o.srvPath())),
|
||||
}
|
||||
}
|
||||
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PATCH", "")
|
||||
update := api.SetFileSystemInfo{
|
||||
FileSystemInfo: api.FileSystemInfoFacet{
|
||||
CreatedDateTime: api.Timestamp(modTime),
|
||||
@@ -1518,7 +1661,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
// Remove versions if required
|
||||
if o.fs.opt.NoVersions {
|
||||
@@ -1555,12 +1698,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
fs.FixRangeOption(options, o.size)
|
||||
var resp *http.Response
|
||||
opts := newOptsCall(o.id, "GET", "/content")
|
||||
opts := o.fs.newOptsCall(o.id, "GET", "/content")
|
||||
opts.Options = options
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1575,22 +1718,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||
id, drive, rootURL := parseNormalizedID(directoryID)
|
||||
var opts rest.Opts
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: rootURL,
|
||||
Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
|
||||
drive, id, rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/createUploadSession",
|
||||
}
|
||||
}
|
||||
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "POST", "/createUploadSession")
|
||||
createRequest := api.CreateUploadRequest{}
|
||||
createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
|
||||
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
|
||||
@@ -1603,7 +1731,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
||||
}
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return response, err
|
||||
}
|
||||
@@ -1618,7 +1746,7 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -1678,11 +1806,11 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
|
||||
}
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
if resp.StatusCode == 200 || resp.StatusCode == 201 {
|
||||
// we are done :)
|
||||
@@ -1705,7 +1833,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -1763,27 +1891,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
var resp *http.Response
|
||||
var opts rest.Opts
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
Options: options,
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
Options: options,
|
||||
}
|
||||
}
|
||||
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PUT", "/content")
|
||||
opts.ContentLength = &size
|
||||
opts.Body = in
|
||||
opts.Options = options
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
@@ -1793,7 +1904,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
||||
}
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1859,8 +1970,42 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseNormalizedID(normalizedID)
|
||||
/*
|
||||
* URL Build routine area start
|
||||
* 1. In this area, region-related URL rewrites are applied. As the API is blackbox,
|
||||
* we cannot thoroughly test this part. Please be extremely careful while changing them.
|
||||
* 2. If possible, please don't introduce region related code in other region, but patch these helper functions.
|
||||
* 3. To avoid region-related issues, please don't manually build rest.Opts from scratch.
|
||||
* Instead, use these helper function, and customize the URL afterwards if needed.
|
||||
*
|
||||
* currently, the 21ViaNet's API differs in the following places:
|
||||
* - https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route}
|
||||
* - this API doesn't work (gives invalid request)
|
||||
* - can be replaced with the following API:
|
||||
* - https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
|
||||
* - however, this API does NOT support multi-level leaf like a/b/c
|
||||
* - https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'")
|
||||
* - this API does support multi-level leaf like a/b/c
|
||||
* - https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
|
||||
* - Same as above
|
||||
*/
|
||||
|
||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||
// and returns itemID, driveID, rootURL.
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func (f *Fs) parseNormalizedID(ID string) (string, string, string) {
|
||||
rootURL := graphAPIEndpoint[f.opt.Region] + "/v1.0/drives"
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], rootURL
|
||||
}
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// newOptsCall build the rest.Opts structure with *a normalizedID(driveID#fileID, or simply fileID)*
|
||||
// using url template https://{Endpoint}/drives/{driveID}/items/{itemID}/{route}
|
||||
func (f *Fs) newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := f.parseNormalizedID(normalizedID)
|
||||
|
||||
if drive != "" {
|
||||
return rest.Opts{
|
||||
@@ -1875,17 +2020,91 @@ func newOptsCall(normalizedID string, method string, route string) (opts rest.Op
|
||||
}
|
||||
}
|
||||
|
||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||
// and returns itemID, driveID, rootURL.
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func parseNormalizedID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], graphURL + "/drives"
|
||||
}
|
||||
return ID, "", ""
|
||||
func escapeSingleQuote(str string) string {
|
||||
return strings.ReplaceAll(str, "'", "''")
|
||||
}
|
||||
|
||||
// newOptsCallWithIDPath build the rest.Opts structure with *a normalizedID (driveID#fileID, or simply fileID) and leaf*
|
||||
// using url template https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route} (for international OneDrive)
|
||||
// or https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
|
||||
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for 21ViaNet)
|
||||
// if isPath is false, this function will only work when the leaf is "" or a child name (i.e. it doesn't accept multi-level leaf)
|
||||
// if isPath is true, multi-level leaf like a/b/c can be passed
|
||||
func (f *Fs) newOptsCallWithIDPath(normalizedID string, leaf string, isPath bool, method string, route string) (opts rest.Opts, ok bool) {
|
||||
encoder := f.opt.Enc.FromStandardName
|
||||
if isPath {
|
||||
encoder = f.opt.Enc.FromStandardPath
|
||||
}
|
||||
trueDirID, drive, rootURL := f.parseNormalizedID(normalizedID)
|
||||
if drive == "" {
|
||||
trueDirID = normalizedID
|
||||
}
|
||||
entity := "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(encoder(leaf))) + route
|
||||
if f.opt.Region == regionCN {
|
||||
if isPath {
|
||||
entity = "/items/" + trueDirID + "/children('@a1')" + route + "?@a1=" + url.QueryEscape("'"+encoder(escapeSingleQuote(leaf))+"'")
|
||||
} else {
|
||||
entity = "/items/" + trueDirID + "/children('" + rest.URLPathEscape(encoder(escapeSingleQuote(leaf))) + "')" + route
|
||||
}
|
||||
}
|
||||
if drive == "" {
|
||||
ok = false
|
||||
opts = rest.Opts{
|
||||
Method: method,
|
||||
Path: entity,
|
||||
}
|
||||
return
|
||||
}
|
||||
ok = true
|
||||
opts = rest.Opts{
|
||||
Method: method,
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + entity,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// newOptsCallWithIDPath build the rest.Opts structure with an *absolute path start from root*
|
||||
// using url template https://{Endpoint}/drives/{driveID}/root:/{path}:/{route}
|
||||
// or https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
|
||||
func (f *Fs) newOptsCallWithRootPath(path string, method string, route string) (opts rest.Opts) {
|
||||
path = strings.TrimSuffix(path, "/")
|
||||
newURL := "/root:/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(path))) + route
|
||||
if f.opt.Region == regionCN {
|
||||
newURL = "/root/children('@a1')" + route + "?@a1=" + url.QueryEscape("'"+escapeSingleQuote(f.opt.Enc.FromStandardPath(path))+"'")
|
||||
}
|
||||
return rest.Opts{
|
||||
Method: method,
|
||||
Path: newURL,
|
||||
}
|
||||
}
|
||||
|
||||
// newOptsCallWithPath build the rest.Opt intelligently.
|
||||
// It will first try to resolve the path using dircache, which enables support for "Share with me" files.
|
||||
// If present in cache, then use ID + Path variant, else fallback into RootPath variant
|
||||
func (f *Fs) newOptsCallWithPath(ctx context.Context, path string, method string, route string) (opts rest.Opts) {
|
||||
if path == "" {
|
||||
url := "/root" + route
|
||||
return rest.Opts{
|
||||
Method: method,
|
||||
Path: url,
|
||||
}
|
||||
}
|
||||
|
||||
// find dircache
|
||||
leaf, directoryID, _ := f.dirCache.FindPath(ctx, path, false)
|
||||
// try to use IDPath variant first
|
||||
if opts, ok := f.newOptsCallWithIDPath(directoryID, leaf, false, method, route); ok {
|
||||
return opts
|
||||
}
|
||||
// fallback to use RootPath variant first
|
||||
return f.newOptsCallWithRootPath(path, method, route)
|
||||
}
|
||||
|
||||
/*
|
||||
* URL Build routine area end
|
||||
*/
|
||||
|
||||
// Returns the canonical form of the driveID
|
||||
func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
|
||||
if driveID == "" {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -19,6 +20,20 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegrationCn runs integration tests against the remote
|
||||
func TestIntegrationCn(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOneDriveCn:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -119,6 +119,7 @@ type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // ID of the file
|
||||
parent string // ID of the parent directory
|
||||
modTime time.Time // The modified time of the object if known
|
||||
md5 string // MD5 hash if known
|
||||
size int64 // Size of the object
|
||||
@@ -164,8 +165,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -188,8 +188,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, "0", f)
|
||||
@@ -207,7 +207,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
Path: "/session/login.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &account, &f.session)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create session")
|
||||
@@ -217,7 +217,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
@@ -234,7 +234,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(ctx, remote, nil, "")
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -294,7 +294,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
Path: "/folder/remove.json",
|
||||
}
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &removeDirData, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -338,7 +338,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -389,7 +389,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -402,7 +402,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -446,7 +446,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -460,7 +460,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -495,7 +495,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Path: "/folder/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &moveFolderData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(src, "DirMove error %v", err)
|
||||
@@ -518,7 +518,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File, parent string) (fs.Object, error) {
|
||||
// fs.Debugf(nil, "newObjectWithInfo(%s, %v)", remote, file)
|
||||
|
||||
var o *Object
|
||||
@@ -527,6 +527,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File) (
|
||||
fs: f,
|
||||
remote: remote,
|
||||
id: file.FileID,
|
||||
parent: parent,
|
||||
modTime: time.Unix(file.DateModified, 0),
|
||||
size: file.Size,
|
||||
md5: file.FileHash,
|
||||
@@ -549,7 +550,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File) (
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// fs.Debugf(nil, "NewObject(\"%s\")", remote)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil, "")
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -582,7 +583,7 @@ func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *Fold
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -632,7 +633,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
Path: "/upload/create_file.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create file")
|
||||
@@ -646,7 +647,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
400, // Bad request (seen in "Next token is expired")
|
||||
401, // Unauthorized (seen in "Token has expired")
|
||||
408, // Request Timeout
|
||||
423, // Locked - get this on folders sometimes
|
||||
@@ -659,7 +659,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -685,7 +688,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Path: "/folder.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &createDirData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -713,7 +716,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
Path: "/folder/list.json/" + f.session.SessionID + "/" + pathID,
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", false, errors.Wrap(err, "failed to get folder list")
|
||||
@@ -723,7 +726,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
for _, folder := range folderList.Folders {
|
||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||
|
||||
if leaf == folder.Name {
|
||||
if strings.EqualFold(leaf, folder.Name) {
|
||||
// found
|
||||
return folder.FolderID, true, nil
|
||||
}
|
||||
@@ -756,7 +759,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
folderList := FolderList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get folder list")
|
||||
@@ -770,6 +773,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
f.dirCache.Put(remote, folder.FolderID)
|
||||
d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID)
|
||||
d.SetItems(int64(folder.ChildFolders))
|
||||
d.SetParentID(directoryID)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
@@ -777,7 +781,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
file.Name = f.opt.Enc.ToStandardName(file.Name)
|
||||
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
|
||||
remote := path.Join(dir, file.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, &file)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, &file, directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -844,7 +848,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
o.modTime = modTime
|
||||
@@ -864,7 +868,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open file)")
|
||||
@@ -883,7 +887,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id,
|
||||
}
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -912,7 +916,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Path: "/upload/open_file_upload.json",
|
||||
}
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create file")
|
||||
@@ -956,7 +960,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &reply)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create file")
|
||||
@@ -979,7 +983,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Path: "/upload/close_file_upload.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &closeUploadData, &closeResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create file")
|
||||
@@ -1005,7 +1009,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Path: "/file/access.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1031,7 +1035,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
o.fs.session.SessionID, directoryID, url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get folder list")
|
||||
@@ -1055,6 +1059,11 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// ParentID returns the ID of the Object parent directory if known, or "" if not
|
||||
func (o *Object) ParentID() string {
|
||||
return o.parent
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1065,4 +1074,5 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.ParentIDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -96,7 +96,7 @@ func (i *Item) ModTime() (t time.Time) {
|
||||
return t
|
||||
}
|
||||
|
||||
// ItemResult is returned from the /listfolder, /createfolder, /deletefolder, /deletefile etc methods
|
||||
// ItemResult is returned from the /listfolder, /createfolder, /deletefolder, /deletefile, etc. methods
|
||||
type ItemResult struct {
|
||||
Error
|
||||
Metadata Item `json:"metadata"`
|
||||
@@ -104,8 +104,9 @@ type ItemResult struct {
|
||||
|
||||
// Hashes contains the supported hashes
|
||||
type Hashes struct {
|
||||
SHA1 string `json:"sha1"`
|
||||
MD5 string `json:"md5"`
|
||||
SHA1 string `json:"sha1"`
|
||||
MD5 string `json:"md5"`
|
||||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
// UploadFileResponse is the response from /uploadfile
|
||||
|
||||
@@ -72,7 +72,7 @@ func init() {
|
||||
Name: "pcloud",
|
||||
Description: "Pcloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
optc := new(Options)
|
||||
err := configstruct.Set(m, optc)
|
||||
if err != nil {
|
||||
@@ -98,7 +98,7 @@ func init() {
|
||||
CheckAuth: checkAuth,
|
||||
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
|
||||
}
|
||||
err = oauthutil.Config("pcloud", name, m, oauthConfig, &opt)
|
||||
err = oauthutil.Config(ctx, "pcloud", name, m, oauthConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
@@ -213,13 +213,16 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
doRetry := false
|
||||
|
||||
// Check if it is an api.Error
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// See https://docs.pcloud.com/errors/ for error treatment
|
||||
// Errors are classified as 1xxx, 2xxx etc
|
||||
// Errors are classified as 1xxx, 2xxx, etc.
|
||||
switch apiErr.Result / 1000 {
|
||||
case 4: // 4xxx: rate limiting
|
||||
doRetry = true
|
||||
@@ -280,8 +283,7 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -289,7 +291,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Pcloud")
|
||||
}
|
||||
@@ -300,12 +302,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
@@ -406,7 +408,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -461,7 +463,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -598,7 +600,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
@@ -622,7 +624,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -663,7 +665,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -701,11 +703,11 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -741,7 +743,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -755,7 +757,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -788,7 +790,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -815,7 +817,7 @@ func (f *Fs) linkDir(ctx context.Context, dirID string, expire fs.Duration) (str
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -839,7 +841,7 @@ func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (str
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -870,7 +872,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &q)
|
||||
err = q.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
@@ -885,6 +887,13 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||
// support SHA256 yet).
|
||||
//
|
||||
// https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
|
||||
if f.opt.Hostname == "eapi.pcloud.com" {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(hash.MD5 | hash.SHA1)
|
||||
}
|
||||
|
||||
@@ -921,7 +930,7 @@ func (o *Object) getHashes(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1040,7 +1049,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1066,7 +1075,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1111,7 +1120,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "PUT",
|
||||
Path: "/uploadfile",
|
||||
Body: in,
|
||||
ContentType: fs.MimeType(ctx, o),
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
ContentLength: &size,
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
@@ -1125,10 +1134,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Special treatment for a 0 length upload. This doesn't work
|
||||
// with PUT even with Content-Length set (by setting
|
||||
// opts.Body=0), so upload it as a multpart form POST with
|
||||
// opts.Body=0), so upload it as a multipart form POST with
|
||||
// Content-Length set.
|
||||
if size == 0 {
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to make multipart upload for 0 length file")
|
||||
}
|
||||
@@ -1145,7 +1154,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// sometimes pcloud leaves a half complete file on
|
||||
@@ -1175,7 +1184,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -78,8 +78,8 @@ func init() {
|
||||
Name: "premiumizeme",
|
||||
Description: "premiumize.me",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("premiumizeme", name, m, oauthConfig, nil)
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "premiumizeme", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
@@ -176,7 +176,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -234,8 +237,7 @@ func (f *Fs) baseParams() url.Values {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -248,12 +250,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var client *http.Client
|
||||
var ts *oauthutil.TokenSource
|
||||
if opt.APIKey == "" {
|
||||
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
|
||||
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure premiumize.me")
|
||||
}
|
||||
} else {
|
||||
client = fshttp.NewClient(fs.Config)
|
||||
client = fshttp.NewClient(ctx)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -261,13 +263,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
@@ -303,7 +305,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&tempF)
|
||||
f.features.Fill(ctx, &tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
@@ -346,7 +348,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
}
|
||||
@@ -371,7 +373,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -408,7 +410,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -582,7 +584,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
@@ -661,7 +663,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Move http")
|
||||
@@ -682,7 +684,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
return nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -718,7 +720,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -770,7 +772,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "CreateDir http")
|
||||
@@ -897,7 +899,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -935,7 +937,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
// Just check the download URL resolves - sometimes
|
||||
// the URLs returned by premiumize.me don't resolve so
|
||||
@@ -994,7 +996,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var result api.Response
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload file http")
|
||||
@@ -1036,7 +1038,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isFile bool, id string, newLeaf str
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rename http")
|
||||
@@ -1061,7 +1063,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "remove http")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
@@ -29,7 +30,10 @@ func (e *statusCodeError) Temporary() bool {
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func parsePath(path string) (root string) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -77,8 +77,8 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
httpClient := fshttp.NewClient(fs.Config)
|
||||
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(name, m, putioConfig, httpClient)
|
||||
httpClient := fshttp.NewClient(ctx)
|
||||
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure putio")
|
||||
}
|
||||
@@ -86,7 +86,7 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
client: putio.NewClient(oAuthClient),
|
||||
httpClient: httpClient,
|
||||
oAuthClient: oAuthClient,
|
||||
@@ -95,9 +95,8 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(p)
|
||||
}).Fill(ctx, p)
|
||||
p.dirCache = dircache.New(root, "0", p)
|
||||
ctx := context.Background()
|
||||
// Find the current root
|
||||
err = p.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
@@ -148,7 +147,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID)
|
||||
entry, err = f.client.Files.CreateFolder(ctx, f.opt.Enc.FromStandardName(leaf), parentID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return itoa(entry.ID), err
|
||||
}
|
||||
@@ -165,7 +164,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing file: %d", fileID)
|
||||
children, _, err = f.client.Files.List(ctx, fileID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
|
||||
@@ -206,7 +205,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files inside List: %d", parentID)
|
||||
children, _, err = f.client.Files.List(ctx, parentID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -236,10 +235,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
@@ -272,7 +271,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "getting file: %d", fileID)
|
||||
entry, err = f.client.Files.Get(ctx, fileID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -283,11 +282,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) {
|
||||
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", "https://upload.put.io/files/", nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(f.opt.Enc.FromStandardName(name)))
|
||||
@@ -297,7 +295,7 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
resp, err := f.oAuthClient.Do(req)
|
||||
retry, err := shouldRetry(err)
|
||||
retry, err := shouldRetry(ctx, err)
|
||||
if retry {
|
||||
return true, err
|
||||
}
|
||||
@@ -322,7 +320,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(f, "Sending zero length chunk")
|
||||
_, fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -346,13 +344,13 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
|
||||
// Get file offset and seek to the position
|
||||
offset, err := f.getServerOffset(ctx, location)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
sentBytes := offset - chunkStart
|
||||
fs.Debugf(f, "sentBytes: %d", sentBytes)
|
||||
_, err = chunk.Seek(sentBytes, io.SeekStart)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
transferOffset = offset
|
||||
reqSize = chunkSize - sentBytes
|
||||
@@ -369,7 +367,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
|
||||
offsetMismatch = true
|
||||
return true, errors.New("connection broken")
|
||||
}
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -429,21 +427,19 @@ func (f *Fs) transferChunk(ctx context.Context, location string, start int64, ch
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadHeadRequest(ctx context.Context, location string) (*http.Request, error) {
|
||||
req, err := http.NewRequest("HEAD", location, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", location, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
||||
req, err := http.NewRequest("PATCH", location, in)
|
||||
req, err := http.NewRequestWithContext(ctx, "PATCH", location, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-offset", strconv.FormatInt(offset, 10))
|
||||
req.Header.Set("content-length", strconv.FormatInt(length, 10))
|
||||
@@ -483,7 +479,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files: %d", dirID)
|
||||
children, _, err = f.client.Files.List(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
@@ -497,7 +493,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "deleting file: %d", dirID)
|
||||
err = f.client.Files.Delete(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
f.dirCache.FlushDir(dir)
|
||||
return err
|
||||
@@ -525,7 +521,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -556,7 +552,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -564,7 +560,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -595,7 +591,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "moving file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -604,7 +600,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -635,7 +631,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "moving file (%s) to parent_id: %s", srcID, dstDirectoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
srcFs.dirCache.FlushDir(srcRemote)
|
||||
return err
|
||||
@@ -648,7 +644,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "getting account info")
|
||||
ai, err = f.client.Account.Info(ctx)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
@@ -682,6 +678,6 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
}
|
||||
// fs.Debugf(f, "emptying trash")
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a putio.File
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
// This isn't a complete set of metadata and has an inaccurate date
|
||||
func (o *Object) setMetadataFromEntry(info putio.File) error {
|
||||
o.file = &info
|
||||
o.modtime = info.UpdatedAt.Time
|
||||
@@ -145,7 +145,7 @@ func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
|
||||
return false, fs.ErrorObjectNotFound
|
||||
}
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -220,7 +220,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var storageURL string
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
storageURL, err = o.fs.client.Files.URL(ctx, o.file.ID, true)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -229,11 +229,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var resp *http.Response
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, storageURL, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, storageURL, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("User-Agent", o.fs.client.UserAgent)
|
||||
|
||||
// merge headers with extra headers
|
||||
@@ -242,7 +241,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
|
||||
resp, err = o.fs.httpClient.Do(req)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
|
||||
_ = resp.Body.Close()
|
||||
@@ -284,6 +283,6 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(o, "removing file: id=%d", o.file.ID)
|
||||
err = o.fs.client.Files.Delete(ctx, o.file.ID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
@@ -59,11 +60,11 @@ func init() {
|
||||
Name: "putio",
|
||||
Description: "Put.io",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
opt := oauthutil.Options{
|
||||
NoOffline: true,
|
||||
}
|
||||
err := oauthutil.Config("putio", name, m, putioConfig, &opt)
|
||||
err := oauthutil.Config(ctx, "putio", name, m, putioConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ as multipart uploads using this chunk size.
|
||||
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
If you are transferring large files over high-speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
@@ -104,10 +104,10 @@ enough memory, then increasing this will speed up the transfers.`,
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
NB if you set this to > 1 then the checksums of multpart uploads
|
||||
NB if you set this to > 1 then the checksums of multipart uploads
|
||||
become corrupted (the uploads themselves are not corrupted though).
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 1,
|
||||
@@ -207,7 +207,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||
/*
|
||||
Pattern to match an endpoint,
|
||||
eg: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
|
||||
e.g.: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
|
||||
"http(s)//qingstor.com" --> "http(s)", "qingstor.com", ""
|
||||
"qingstor.com" --> "", "qingstor.com", ""
|
||||
*/
|
||||
@@ -228,7 +228,7 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||
}
|
||||
|
||||
// qsConnection makes a connection to qingstor
|
||||
func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error) {
|
||||
accessKeyID := opt.AccessKeyID
|
||||
secretAccessKey := opt.SecretAccessKey
|
||||
|
||||
@@ -277,7 +277,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
cf.Host = host
|
||||
cf.Port = port
|
||||
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
|
||||
cf.Connection = fshttp.NewClient(fs.Config)
|
||||
cf.Connection = fshttp.NewClient(ctx)
|
||||
|
||||
return qs.Init(cf)
|
||||
}
|
||||
@@ -319,7 +319,7 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -334,7 +334,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||
}
|
||||
svc, err := qsServiceConnection(opt)
|
||||
svc, err := qsServiceConnection(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -357,7 +357,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
@@ -428,7 +428,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return fsObj, fsObj.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -872,11 +872,12 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxLimit := int(listLimitSize)
|
||||
// maxLimit := int(listLimitSize)
|
||||
var marker *string
|
||||
for {
|
||||
req := qs.ListMultipartUploadsInput{
|
||||
Limit: &maxLimit,
|
||||
// The default is 200 but this errors if more than 200 is put in so leave at the default
|
||||
// Limit: &maxLimit,
|
||||
KeyMarker: marker,
|
||||
}
|
||||
var resp *qs.ListMultipartUploadsOutput
|
||||
@@ -927,7 +928,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
}
|
||||
for _, entry := range entries {
|
||||
cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
|
||||
if err != nil {
|
||||
if cleanErr != nil {
|
||||
fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
|
||||
err = cleanErr
|
||||
}
|
||||
|
||||
566
backend/s3/s3.go
566
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -53,6 +53,7 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
var md5 string
|
||||
var contentType string
|
||||
var headersToSign []string
|
||||
tmpHeadersToSign := make(map[string][]string)
|
||||
for k, v := range req.Header {
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
@@ -62,15 +63,24 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
contentType = v[0]
|
||||
default:
|
||||
if strings.HasPrefix(k, "x-amz-") {
|
||||
vall := strings.Join(v, ",")
|
||||
headersToSign = append(headersToSign, k+":"+vall)
|
||||
tmpHeadersToSign[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
var keys []string
|
||||
for k := range tmpHeadersToSign {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
vall := strings.Join(tmpHeadersToSign[key], ",")
|
||||
headersToSign = append(headersToSign, key+":"+vall)
|
||||
}
|
||||
// Make headers of interest into canonical string
|
||||
var joinedHeadersToSign string
|
||||
if len(headersToSign) > 0 {
|
||||
sort.StringSlice(headersToSign).Sort()
|
||||
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ type Library struct {
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Owner string `json:"owner"`
|
||||
ID string `json:"id"`
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
Name string `json:"name"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
@@ -27,7 +28,7 @@ func init() {
|
||||
}
|
||||
|
||||
// getPacer returns the unique pacer for that remote URL
|
||||
func getPacer(remote string) *fs.Pacer {
|
||||
func getPacer(ctx context.Context, remote string) *fs.Pacer {
|
||||
pacerMutex.Lock()
|
||||
defer pacerMutex.Unlock()
|
||||
|
||||
@@ -37,6 +38,7 @@ func getPacer(remote string) *fs.Pacer {
|
||||
}
|
||||
|
||||
pacers[remote] = fs.NewPacer(
|
||||
ctx,
|
||||
pacer.NewDefault(
|
||||
pacer.MinSleep(minSleep),
|
||||
pacer.MaxSleep(maxSleep),
|
||||
|
||||
@@ -147,7 +147,7 @@ type Fs struct {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -197,15 +197,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
||||
pacer: getPacer(opt.URL),
|
||||
srv: rest.NewClient(fshttp.NewClient(ctx)).SetRoot(u.String()),
|
||||
pacer: getPacer(ctx, opt.URL),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: opt.LibraryName == "",
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
|
||||
ctx := context.Background()
|
||||
serverInfo, err := f.getServerInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -297,7 +296,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Config callback for 2FA
|
||||
func Config(name string, m configmap.Mapper) {
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
serverURL, ok := m.Get(configURL)
|
||||
if !ok || serverURL == "" {
|
||||
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
|
||||
@@ -306,7 +306,7 @@ func Config(name string, m configmap.Mapper) {
|
||||
}
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
if ci.AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -343,7 +343,7 @@ func Config(name string, m configmap.Mapper) {
|
||||
if !strings.HasPrefix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(url)
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
|
||||
|
||||
// We loop asking for a 2FA code
|
||||
for {
|
||||
@@ -372,7 +372,6 @@ func Config(name string, m configmap.Mapper) {
|
||||
m.Set(configAuthToken, token)
|
||||
// And delete any previous entry for password
|
||||
m.Set(configPassword, "")
|
||||
config.SaveConfig()
|
||||
// And we're done here
|
||||
break
|
||||
}
|
||||
@@ -408,7 +407,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// For 429 errors look at the Retry-After: header and
|
||||
// set the retry appropriately, starting with a minimum of 1
|
||||
// second if it isn't set.
|
||||
@@ -663,7 +665,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) e
|
||||
|
||||
// ==================== Optional Interface fs.Copier ====================
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -714,7 +716,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// ==================== Optional Interface fs.Mover ====================
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -804,7 +806,7 @@ func (f *Fs) adjustDestination(ctx context.Context, libraryID, srcFilename, dstP
|
||||
// ==================== Optional Interface fs.DirMover ====================
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1004,7 +1006,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
|
||||
|
||||
for _, library := range libraries {
|
||||
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
||||
d.SetSize(int64(library.Size))
|
||||
d.SetSize(library.Size)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ func (f *Fs) getServerInfo(ctx context.Context) (account *api.ServerInfo, err er
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -112,7 +112,7 @@ func (f *Fs) getUserAccountInfo(ctx context.Context) (account *api.AccountInfo,
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -139,7 +139,7 @@ func (f *Fs) getLibraries(ctx context.Context) ([]api.Library, error) {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -170,7 +170,7 @@ func (f *Fs) createLibrary(ctx context.Context, libraryName, password string) (l
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -197,7 +197,7 @@ func (f *Fs) deleteLibrary(ctx context.Context, libraryID string) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -228,7 +228,7 @@ func (f *Fs) decryptLibrary(ctx context.Context, libraryID, password string) err
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -271,7 +271,7 @@ func (f *Fs) getDirectoryEntriesAPIv21(ctx context.Context, libraryID, dirPath s
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -316,7 +316,7 @@ func (f *Fs) getDirectoryDetails(ctx context.Context, libraryID, dirPath string)
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -358,7 +358,7 @@ func (f *Fs) createDir(ctx context.Context, libraryID, dirPath string) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -398,7 +398,7 @@ func (f *Fs) renameDir(ctx context.Context, libraryID, dirPath, newName string)
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -438,7 +438,7 @@ func (f *Fs) moveDir(ctx context.Context, srcLibraryID, srcDir, srcName, dstLibr
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -474,7 +474,7 @@ func (f *Fs) deleteDir(ctx context.Context, libraryID, filePath string) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -505,7 +505,7 @@ func (f *Fs) getFileDetails(ctx context.Context, libraryID, filePath string) (*a
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -539,7 +539,7 @@ func (f *Fs) deleteFile(ctx context.Context, libraryID, filePath string) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete file")
|
||||
@@ -565,7 +565,7 @@ func (f *Fs) getDownloadLink(ctx context.Context, libraryID, filePath string) (s
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -614,7 +614,7 @@ func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -659,7 +659,7 @@ func (f *Fs) getUploadLink(ctx context.Context, libraryID string) (string, error
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -682,7 +682,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
||||
"need_idx_progress": {"true"},
|
||||
"replace": {"1"},
|
||||
}
|
||||
formReader, contentType, _, err := rest.MultipartUpload(in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make multipart upload")
|
||||
}
|
||||
@@ -739,7 +739,7 @@ func (f *Fs) listShareLinks(ctx context.Context, libraryID, remote string) ([]ap
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -777,7 +777,7 @@ func (f *Fs) createShareLink(ctx context.Context, libraryID, remote string) (*ap
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -818,7 +818,7 @@ func (f *Fs) copyFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -860,7 +860,7 @@ func (f *Fs) moveFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -900,7 +900,7 @@ func (f *Fs) renameFile(ctx context.Context, libraryID, filePath, newname string
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -938,7 +938,7 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -976,7 +976,7 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -1030,7 +1030,7 @@ func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibrar
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -1075,7 +1075,7 @@ func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname s
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
|
||||
@@ -11,17 +11,18 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/knownhosts"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,7 +45,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = readCurrentUser()
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -82,6 +84,21 @@ func init() {
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "pubkey_file",
|
||||
Help: `Optional path to public key file.
|
||||
|
||||
Set this if you have a signed certificate you want to use for authentication.` + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "known_hosts_file",
|
||||
Help: `Optional path to known_hosts file.
|
||||
|
||||
Set this value to enable server host key validation.` + env.ShellExpandHelp,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "~/.ssh/known_hosts",
|
||||
Help: "Use OpenSSH's known_hosts file",
|
||||
}},
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
Help: `When set forces the usage of the ssh-agent.
|
||||
@@ -176,6 +193,50 @@ Home directory can be found in a shared folder called "home"
|
||||
|
||||
The subsystem option is ignored when server_command is defined.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_fstat",
|
||||
Default: false,
|
||||
Help: `If set use fstat instead of stat
|
||||
|
||||
Some servers limit the amount of open files and calling Stat after opening
|
||||
the file will throw an error from the server. Setting this flag will call
|
||||
Fstat instead of Stat which is called on an already open file handle.
|
||||
|
||||
It has been found that this helps with IBM Sterling SFTP servers which have
|
||||
"extractability" level set to 1 which means only 1 file can be opened at
|
||||
any given time.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_concurrent_reads",
|
||||
Default: false,
|
||||
Help: `If set don't use concurrent reads
|
||||
|
||||
Normally concurrent reads are safe to use and not using them will
|
||||
degrade performance, so this option is disabled by default.
|
||||
|
||||
Some servers limit the amount number of times a file can be
|
||||
downloaded. Using concurrent reads can trigger this limit, so if you
|
||||
have a server which returns
|
||||
|
||||
Failed to copy: file does not exist
|
||||
|
||||
Then you may need to enable this flag.
|
||||
|
||||
If concurrent reads are disabled, the use_fstat option is ignored.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -183,24 +244,29 @@ The subsystem option is ignored when server_command is defined.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
UseFstat bool `config:"use_fstat"`
|
||||
DisableConcurrentReads bool `config:"disable_concurrent_reads"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -209,6 +275,7 @@ type Fs struct {
|
||||
root string
|
||||
absRoot string
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
m configmap.Mapper // config
|
||||
features *fs.Features // optional features
|
||||
config *ssh.ClientConfig
|
||||
@@ -217,7 +284,10 @@ type Fs struct {
|
||||
cachedHashes *hash.Set
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
savedpswd string
|
||||
transfers int32 // count in use references
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -231,25 +301,11 @@ type Object struct {
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
}
|
||||
|
||||
// readCurrentUser finds the current user name or "" if not found
|
||||
func readCurrentUser() (userName string) {
|
||||
usr, err := user.Current()
|
||||
if err == nil {
|
||||
return usr.Username
|
||||
}
|
||||
// Fall back to reading $USER then $LOGNAME
|
||||
userName = os.Getenv("USER")
|
||||
if userName != "" {
|
||||
return userName
|
||||
}
|
||||
return os.Getenv("LOGNAME")
|
||||
}
|
||||
|
||||
// dial starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
func (f *Fs) dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
|
||||
dialer := fshttp.NewDialer(fs.Config)
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
conn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -294,13 +350,30 @@ func (c *conn) closed() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Show that we are doing an upload or download
|
||||
//
|
||||
// Call removeTransfer() when done
|
||||
func (f *Fs) addTransfer() {
|
||||
atomic.AddInt32(&f.transfers, 1)
|
||||
}
|
||||
|
||||
// Show the upload or download done
|
||||
func (f *Fs) removeTransfer() {
|
||||
atomic.AddInt32(&f.transfers, -1)
|
||||
}
|
||||
|
||||
// getTransfers shows whether there are any transfers in progress
|
||||
func (f *Fs) getTransfers() int32 {
|
||||
return atomic.LoadInt32(&f.transfers)
|
||||
}
|
||||
|
||||
// Open a new connection to the SFTP server.
|
||||
func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
// Rate limit rate of new connections
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
c.sshClient, err = f.dial("tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
||||
}
|
||||
@@ -338,12 +411,22 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
|
||||
opts = append(opts,
|
||||
sftp.UseFstat(f.opt.UseFstat),
|
||||
// FIXME disabled after library reversion
|
||||
// sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
|
||||
)
|
||||
if f.opt.DisableConcurrentReads { // FIXME
|
||||
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
|
||||
}
|
||||
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
|
||||
// Get an SFTP connection from the pool, or open a new one
|
||||
func (f *Fs) getSftpConnection() (c *conn, err error) {
|
||||
func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
accounting.LimitTPS(ctx)
|
||||
f.poolMu.Lock()
|
||||
for len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
@@ -360,7 +443,7 @@ func (f *Fs) getSftpConnection() (c *conn, err error) {
|
||||
return c, nil
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = f.sftpConnection()
|
||||
c, err = f.sftpConnection(ctx)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -404,13 +487,51 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// Drain the pool of any connections
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if transfers := f.getTransfers(); transfers != 0 {
|
||||
fs.Debugf(f, "Not closing %d unused connections as %d transfers in progress", len(f.pool), transfers)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "closing %d unused connections", len(f.pool))
|
||||
}
|
||||
for i, c := range f.pool {
|
||||
if cErr := c.closed(); cErr == nil {
|
||||
cErr = c.close()
|
||||
if cErr != nil {
|
||||
err = cErr
|
||||
}
|
||||
}
|
||||
f.pool[i] = nil
|
||||
}
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// This will hold the Fs object. We need to create it here
|
||||
// so we can refer to it in the SSH callback, but it's populated
|
||||
// in NewFsWithConnection
|
||||
f := &Fs{
|
||||
ci: fs.GetConfig(ctx),
|
||||
}
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -423,12 +544,21 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.Port == "" {
|
||||
opt.Port = "22"
|
||||
}
|
||||
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
Timeout: fs.Config.ConnectTimeout,
|
||||
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
|
||||
Timeout: f.ci.ConnectTimeout,
|
||||
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
|
||||
}
|
||||
|
||||
if opt.KnownHostsFile != "" {
|
||||
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
|
||||
}
|
||||
sshConfig.HostKeyCallback = hostcallback
|
||||
}
|
||||
|
||||
if opt.UseInsecureCipher {
|
||||
@@ -438,6 +568,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
keyFile := env.ShellExpand(opt.KeyFile)
|
||||
pubkeyFile := env.ShellExpand(opt.PubKeyFile)
|
||||
//keyPem := env.ShellExpand(opt.KeyPem)
|
||||
// Add ssh agent-auth if no password or file or key PEM specified
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||
@@ -507,7 +638,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||
|
||||
// If a public key has been specified then use that
|
||||
if pubkeyFile != "" {
|
||||
certfile, err := ioutil.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read cert file")
|
||||
}
|
||||
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to parse cert file")
|
||||
}
|
||||
|
||||
// And the signer for this, which includes the private key signer
|
||||
// This is what we'll pass to the ssh client.
|
||||
// Normally the ssh client will use the public key built
|
||||
// into the private key, but we need to tell it to use the user
|
||||
// specified public key cert. This signer is specific to the
|
||||
// cert and will include the private key signer. Now ssh
|
||||
// knows everything it needs.
|
||||
cert, ok := pk.(*ssh.Certificate)
|
||||
if !ok {
|
||||
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||
}
|
||||
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error generating cert signer")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
|
||||
} else {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||
}
|
||||
}
|
||||
|
||||
// Auth from password if specified
|
||||
@@ -516,39 +678,81 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
sshConfig.Auth = append(sshConfig.Auth,
|
||||
ssh.Password(clearpass),
|
||||
ssh.KeyboardInteractive(func(user, instruction string, questions []string, echos []bool) ([]string, error) {
|
||||
return f.keyboardInteractiveReponse(user, instruction, questions, echos, clearpass)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
// Ask for password if none was defined and we're allowed to
|
||||
// Config for password if none was defined and we're allowed to
|
||||
// We don't ask now; we ask if the ssh connection succeeds
|
||||
if opt.Pass == "" && opt.AskPassword {
|
||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||
clearpass := config.ReadPassword()
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
sshConfig.Auth = append(sshConfig.Auth,
|
||||
ssh.PasswordCallback(f.getPass),
|
||||
ssh.KeyboardInteractive(func(user, instruction string, questions []string, echos []bool) ([]string, error) {
|
||||
pass, _ := f.getPass()
|
||||
return f.keyboardInteractiveReponse(user, instruction, questions, echos, pass)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
|
||||
return NewFsWithConnection(ctx, f, name, root, m, opt, sshConfig)
|
||||
}
|
||||
|
||||
// Do the keyboard interactive challenge
|
||||
//
|
||||
// Just send the password back for all questions
|
||||
func (f *Fs) keyboardInteractiveReponse(user, instruction string, questions []string, echos []bool, pass string) ([]string, error) {
|
||||
fs.Debugf(f, "keyboard interactive auth requested")
|
||||
answers := make([]string, len(questions))
|
||||
for i := range answers {
|
||||
answers[i] = pass
|
||||
}
|
||||
return answers, nil
|
||||
}
|
||||
|
||||
// If we're in password mode and ssh connection succeeds then this
|
||||
// callback is called. First time around we ask the user, and then
|
||||
// save it so on reconnection we give back the previous string.
|
||||
// This removes the ability to let the user correct a mistaken entry,
|
||||
// but means that reconnects are transparent.
|
||||
// We'll re-use config.Pass for this, 'cos we know it's not been
|
||||
// specified.
|
||||
func (f *Fs) getPass() (string, error) {
|
||||
for f.savedpswd == "" {
|
||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||
f.savedpswd = config.ReadPassword()
|
||||
}
|
||||
return f.savedpswd, nil
|
||||
}
|
||||
|
||||
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
|
||||
// the host specified in the ssh.ClientConfig
|
||||
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
absRoot: root,
|
||||
opt: *opt,
|
||||
m: m,
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
// Populate the Filesystem Object
|
||||
f.name = name
|
||||
f.root = root
|
||||
f.absRoot = root
|
||||
f.opt = *opt
|
||||
f.m = m
|
||||
f.config = sshConfig
|
||||
f.url = "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root
|
||||
f.mkdirLock = newStringLock()
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
f.savedpswd = ""
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
@@ -616,7 +820,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat()
|
||||
err := o.stat(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -625,11 +829,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// dirExists returns true,nil if the directory exists, false, nil if
|
||||
// it doesn't or false, err
|
||||
func (f *Fs) dirExists(dir string) (bool, error) {
|
||||
func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
|
||||
if dir == "" {
|
||||
dir = "."
|
||||
}
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
@@ -658,7 +862,7 @@ func (f *Fs) dirExists(dir string) (bool, error) {
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
root := path.Join(f.absRoot, dir)
|
||||
ok, err := f.dirExists(root)
|
||||
ok, err := f.dirExists(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "List failed")
|
||||
}
|
||||
@@ -669,7 +873,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if sftpDir == "" {
|
||||
sftpDir = "."
|
||||
}
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "List")
|
||||
}
|
||||
@@ -688,7 +892,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
oldInfo := info
|
||||
info, err = f.stat(remote)
|
||||
info, err = f.stat(ctx, remote)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
|
||||
@@ -713,7 +917,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Put data from <in> into a new remote sftp file object described by <src.Remote()> and <src.ModTime(ctx)>
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
err := f.mkParentDir(src.Remote())
|
||||
err := f.mkParentDir(ctx, src.Remote())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||
}
|
||||
@@ -736,19 +940,19 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// mkParentDir makes the parent of remote if necessary and any
|
||||
// directories above that
|
||||
func (f *Fs) mkParentDir(remote string) error {
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
parent := path.Dir(remote)
|
||||
return f.mkdir(path.Join(f.absRoot, parent))
|
||||
return f.mkdir(ctx, path.Join(f.absRoot, parent))
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
f.mkdirLock.Lock(dirPath)
|
||||
defer f.mkdirLock.Unlock(dirPath)
|
||||
if dirPath == "." || dirPath == "/" {
|
||||
return nil
|
||||
}
|
||||
ok, err := f.dirExists(dirPath)
|
||||
ok, err := f.dirExists(ctx, dirPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "mkdir dirExists failed")
|
||||
}
|
||||
@@ -756,11 +960,11 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
return nil
|
||||
}
|
||||
parent := path.Dir(dirPath)
|
||||
err = f.mkdir(parent)
|
||||
err = f.mkdir(ctx, parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
@@ -775,7 +979,7 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.absRoot, dir)
|
||||
return f.mkdir(root)
|
||||
return f.mkdir(ctx, root)
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
@@ -791,7 +995,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
// Remove the directory
|
||||
root := path.Join(f.absRoot, dir)
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
@@ -807,11 +1011,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err := f.mkParentDir(remote)
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||
}
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
@@ -831,7 +1035,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -848,7 +1052,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
dstPath := path.Join(f.absRoot, dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
ok, err := f.dirExists(dstPath)
|
||||
ok, err := f.dirExists(ctx, dstPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove dirExists dst failed")
|
||||
}
|
||||
@@ -857,13 +1061,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkdir(path.Dir(dstPath))
|
||||
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
}
|
||||
|
||||
// Do the move
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
@@ -879,8 +1083,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// run runds cmd on the remote end returning standard output
|
||||
func (f *Fs) run(cmd string) ([]byte, error) {
|
||||
c, err := f.getSftpConnection()
|
||||
func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP connection")
|
||||
}
|
||||
@@ -888,7 +1092,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
|
||||
|
||||
session, err := c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP sessiion")
|
||||
return nil, errors.Wrap(err, "run: get SFTP session")
|
||||
}
|
||||
defer func() {
|
||||
_ = session.Close()
|
||||
@@ -908,6 +1112,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
ctx := context.TODO()
|
||||
if f.opt.DisableHashCheck {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
@@ -926,7 +1131,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
*changed = true
|
||||
for _, command := range commands {
|
||||
output, err := f.run(command)
|
||||
output, err := f.run(ctx, command)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -971,7 +1176,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if len(escapedPath) == 0 {
|
||||
escapedPath = "/"
|
||||
}
|
||||
stdout, err := f.run("df -k " + escapedPath)
|
||||
stdout, err := f.run(ctx, "df -k "+escapedPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "your remote may not support About")
|
||||
}
|
||||
@@ -990,6 +1195,12 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return f.drainPool(ctx)
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote sftp file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
@@ -1034,7 +1245,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection()
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||
}
|
||||
@@ -1087,7 +1298,7 @@ func shellEscape(str string) string {
|
||||
func parseHash(bytes []byte) string {
|
||||
// For strings with backslash *sum writes a leading \
|
||||
// https://unix.stackexchange.com/q/313733/94054
|
||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
||||
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
@@ -1142,8 +1353,8 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
}
|
||||
|
||||
// statRemote stats the file or directory at the remote given
|
||||
func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
|
||||
c, err := f.getSftpConnection()
|
||||
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "stat")
|
||||
}
|
||||
@@ -1154,8 +1365,8 @@ func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
|
||||
}
|
||||
|
||||
// stat updates the info in the Object
|
||||
func (o *Object) stat() error {
|
||||
info, err := o.fs.stat(o.remote)
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
info, err := o.fs.stat(ctx, o.remote)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fs.ErrorObjectNotFound
|
||||
@@ -1173,43 +1384,48 @@ func (o *Object) stat() error {
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.opt.SetModTime {
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
if !o.fs.opt.SetModTime {
|
||||
return nil
|
||||
}
|
||||
err := o.stat()
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
err = o.stat(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime stat failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns whether the remote sftp file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
|
||||
// Storable returns whether the remote sftp file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||
func (o *Object) Storable() bool {
|
||||
return o.mode.IsRegular()
|
||||
}
|
||||
|
||||
// objectReader represents a file open for reading on the SFTP server
|
||||
type objectReader struct {
|
||||
f *Fs
|
||||
sftpFile *sftp.File
|
||||
pipeReader *io.PipeReader
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||
func (f *Fs) newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
file := &objectReader{
|
||||
f: f,
|
||||
sftpFile: sftpFile,
|
||||
pipeReader: pipeReader,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
// Show connection in use
|
||||
f.addTransfer()
|
||||
|
||||
go func() {
|
||||
// Use sftpFile.WriteTo to pump data so that it gets a
|
||||
@@ -1239,6 +1455,8 @@ func (file *objectReader) Close() (err error) {
|
||||
_ = file.pipeReader.Close()
|
||||
// Wait for the background process to finish
|
||||
<-file.done
|
||||
// Show connection no longer in use
|
||||
file.f.removeTransfer()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1257,7 +1475,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getSftpConnection()
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open")
|
||||
}
|
||||
@@ -1272,16 +1490,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, errors.Wrap(err, "Open Seek failed")
|
||||
}
|
||||
}
|
||||
in = readers.NewLimitedReadCloser(newObjectReader(sftpFile), limit)
|
||||
in = readers.NewLimitedReadCloser(o.fs.newObjectReader(sftpFile), limit)
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// Update a remote sftp file using the data <in> and ModTime from <src>
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
o.fs.addTransfer() // Show transfer in progress
|
||||
defer o.fs.removeTransfer()
|
||||
// Clear the hash cache since we are about to update the object
|
||||
o.md5sum = nil
|
||||
o.sha1sum = nil
|
||||
c, err := o.fs.getSftpConnection()
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
@@ -1292,7 +1512,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
c, removeErr := o.fs.getSftpConnection()
|
||||
c, removeErr := o.fs.getSftpConnection(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr)
|
||||
return
|
||||
@@ -1315,16 +1535,34 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
remove()
|
||||
return errors.Wrap(err, "Update Close failed")
|
||||
}
|
||||
|
||||
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update SetModTime failed")
|
||||
}
|
||||
|
||||
// Stat the file after the upload to read its stats back if o.fs.opt.SetModTime == false
|
||||
if !o.fs.opt.SetModTime {
|
||||
err = o.stat(ctx)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// In the specific case of o.fs.opt.SetModTime == false
|
||||
// if the object wasn't found then don't return an error
|
||||
fs.Debugf(o, "Not found after upload with set_modtime=false so returning best guess")
|
||||
o.modTime = src.ModTime(ctx)
|
||||
o.size = src.Size()
|
||||
o.mode = os.FileMode(0666) // regular file
|
||||
} else if err != nil {
|
||||
return errors.Wrap(err, "Update stat failed")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove a remote sftp file object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
c, err := o.fs.getSftpConnection()
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
@@ -1340,5 +1578,6 @@ var (
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Abouter = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -95,7 +95,7 @@ type UploadSpecification struct {
|
||||
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
|
||||
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
|
||||
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
|
||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
|
||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supported.
|
||||
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
|
||||
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
|
||||
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user