mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
924 Commits
fix-oauth-
...
31df39d356
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
31df39d356 | ||
|
|
03d3811f7f | ||
|
|
83b83f7768 | ||
|
|
71138082ea | ||
|
|
cf94824426 | ||
|
|
16971ab6b9 | ||
|
|
9f75af38e3 | ||
|
|
b5e4d39b05 | ||
|
|
4d19afdbbf | ||
|
|
2ebfedce85 | ||
|
|
1a4b85b6e7 | ||
|
|
5052b80298 | ||
|
|
fada870ff0 | ||
|
|
38f456c527 | ||
|
|
e6d82ac6ee | ||
|
|
4c74ded85a | ||
|
|
43848f5c42 | ||
|
|
fb895f69a1 | ||
|
|
b204090325 | ||
|
|
1821d86911 | ||
|
|
7ce67347fb | ||
|
|
0228bbff39 | ||
|
|
6890bd7738 | ||
|
|
bc5d1dfaf3 | ||
|
|
c33aeb705f | ||
|
|
12cf8e71df | ||
|
|
ec5ddb68a8 | ||
|
|
8335596207 | ||
|
|
4f56ab2341 | ||
|
|
8b5b7ecfd9 | ||
|
|
2aa2cfc70e | ||
|
|
7265b2331f | ||
|
|
0dd56ff2a3 | ||
|
|
2443cb284e | ||
|
|
0f3aa17fb6 | ||
|
|
8f74e7d331 | ||
|
|
ee92673e1b | ||
|
|
55655efabf | ||
|
|
700e6e11fd | ||
|
|
edb47076b5 | ||
|
|
e5fd97b8d2 | ||
|
|
bc57a31859 | ||
|
|
4adb48fbbc | ||
|
|
c41d0f7d3a | ||
|
|
d34ba258b0 | ||
|
|
05d54a95b8 | ||
|
|
f16b39165b | ||
|
|
86edb26fd5 | ||
|
|
203e1bdbf9 | ||
|
|
a522c056fe | ||
|
|
31adc7d89f | ||
|
|
c559ab7c58 | ||
|
|
80610ef774 | ||
|
|
a6c943a1ad | ||
|
|
53e0dbb5cb | ||
|
|
3a0000526b | ||
|
|
1fa6941e26 | ||
|
|
9bb7ad31e6 | ||
|
|
da8c6847ad | ||
|
|
d240d044c3 | ||
|
|
1056ace80f | ||
|
|
a06c1c0cb7 | ||
|
|
7672c3d586 | ||
|
|
f361cdf1cb | ||
|
|
26d3c71bab | ||
|
|
c76396f03c | ||
|
|
059ad47336 | ||
|
|
becc068d36 | ||
|
|
94deb6bd6f | ||
|
|
cc09978b79 | ||
|
|
409dc75328 | ||
|
|
fb30c5f8dd | ||
|
|
203df6cc58 | ||
|
|
459e10d599 | ||
|
|
1ba4fd1d83 | ||
|
|
77553b8dd5 | ||
|
|
5420dbbe38 | ||
|
|
87b71dd6b9 | ||
|
|
a0bcdc2638 | ||
|
|
e42fa9f92d | ||
|
|
4586104dc7 | ||
|
|
c4c360a285 | ||
|
|
ce4860b9b6 | ||
|
|
ed87f82d21 | ||
|
|
0a82929b94 | ||
|
|
1e8ee3b813 | ||
|
|
eaab3f5271 | ||
|
|
25b05f1210 | ||
|
|
2dc1b07863 | ||
|
|
49acacec2e | ||
|
|
70d2fe6568 | ||
|
|
f28c83c6de | ||
|
|
2cf44e584c | ||
|
|
bba9027817 | ||
|
|
51859af8d9 | ||
|
|
4f60f8915d | ||
|
|
6663eb346f | ||
|
|
1d0e1ea0b5 | ||
|
|
71631621c4 | ||
|
|
31e904d84c | ||
|
|
30c9843e3d | ||
|
|
c8a834f0e8 | ||
|
|
b272c50c4c | ||
|
|
b8700e8042 | ||
|
|
73193b0565 | ||
|
|
c4eef3065f | ||
|
|
ba2a642961 | ||
|
|
979c6a573d | ||
|
|
bbb866018e | ||
|
|
7706f02294 | ||
|
|
6df7913181 | ||
|
|
c079495d1f | ||
|
|
3bf1ac5b07 | ||
|
|
091caa34c6 | ||
|
|
d507e9be39 | ||
|
|
40b3251e41 | ||
|
|
484d955ea8 | ||
|
|
8fa9f255a0 | ||
|
|
e7f11af1ca | ||
|
|
0b5c4cc442 | ||
|
|
178ddafdc7 | ||
|
|
ad316ec6e3 | ||
|
|
61b022dfc3 | ||
|
|
1903b4c1a2 | ||
|
|
f7cbcf556f | ||
|
|
3581e628c0 | ||
|
|
62c41bf449 | ||
|
|
c5864e113b | ||
|
|
39259a5bd1 | ||
|
|
2e376eb3b9 | ||
|
|
de8e9d4693 | ||
|
|
710cf49bc6 | ||
|
|
8dacac60ea | ||
|
|
3a80d4d4b4 | ||
|
|
a531f987a8 | ||
|
|
e906b8d0c4 | ||
|
|
a5932ef91a | ||
|
|
3afa563eaf | ||
|
|
9d9654b31f | ||
|
|
cfe257f13d | ||
|
|
0375efbd35 | ||
|
|
cad1954213 | ||
|
|
604e37caa5 | ||
|
|
b249d384b9 | ||
|
|
04e91838db | ||
|
|
94829aaec5 | ||
|
|
f574e3395c | ||
|
|
2bc155a96a | ||
|
|
adc8ea3427 | ||
|
|
068eea025c | ||
|
|
4510aa679a | ||
|
|
79281354c7 | ||
|
|
f57a178719 | ||
|
|
44f2e2ed39 | ||
|
|
13e1752d94 | ||
|
|
bb82c0e43b | ||
|
|
1af7151e73 | ||
|
|
fd63478ed6 | ||
|
|
5133b05c74 | ||
|
|
6ba96ede4b | ||
|
|
2896973964 | ||
|
|
be123d85ff | ||
|
|
b1b9562ab7 | ||
|
|
5146b66569 | ||
|
|
8898372d5a | ||
|
|
091fe9e453 | ||
|
|
8fdb68e41a | ||
|
|
c124aa2ed3 | ||
|
|
54e8bb89f7 | ||
|
|
50c1b594ab | ||
|
|
72437a9ca2 | ||
|
|
8ed55c61e1 | ||
|
|
bd598c1ceb | ||
|
|
7e30665102 | ||
|
|
d44957a09c | ||
|
|
37524e2dea | ||
|
|
2f6a6c8233 | ||
|
|
4ad40b6554 | ||
|
|
4f33d64f25 | ||
|
|
519623d9f1 | ||
|
|
913278327b | ||
|
|
a9b05e4c7a | ||
|
|
5d6d79e7d4 | ||
|
|
11de074cbf | ||
|
|
e9ab177a32 | ||
|
|
f3f4fba98d | ||
|
|
03fccdd67b | ||
|
|
231083647e | ||
|
|
0e203a7546 | ||
|
|
a7dd787569 | ||
|
|
689555033e | ||
|
|
4fc4898287 | ||
|
|
b003169088 | ||
|
|
babd112665 | ||
|
|
71b9b4ad7a | ||
|
|
4368863fcb | ||
|
|
04d49bf0ea | ||
|
|
d7aa37d263 | ||
|
|
379dffa61c | ||
|
|
5fd4ece31f | ||
|
|
fc3f95190b | ||
|
|
d6f5652b65 | ||
|
|
b5cbb7520d | ||
|
|
a170dfa55b | ||
|
|
1449c5b5ba | ||
|
|
35fe609722 | ||
|
|
cce399515f | ||
|
|
8c5af2f51c | ||
|
|
c639d3656e | ||
|
|
d9fbbba5c3 | ||
|
|
fd87560388 | ||
|
|
d87720a787 | ||
|
|
d541caa52b | ||
|
|
fd1665ae93 | ||
|
|
457d80e8a9 | ||
|
|
c5a3e86df8 | ||
|
|
4026e8db20 | ||
|
|
c9ce686231 | ||
|
|
b085598cbc | ||
|
|
bb47dccdeb | ||
|
|
7a279d2789 | ||
|
|
9bd5df658a | ||
|
|
d512e4d566 | ||
|
|
3dd68c824a | ||
|
|
fbe73c993b | ||
|
|
d915f75edf | ||
|
|
26b629f42f | ||
|
|
ceaac2194c | ||
|
|
1f14b6aa35 | ||
|
|
dd75af6a18 | ||
|
|
99e8a63df2 | ||
|
|
0019e18ac3 | ||
|
|
218c3bf6e9 | ||
|
|
8f9702583d | ||
|
|
e6578fb5a1 | ||
|
|
fa1d7da272 | ||
|
|
813708c24d | ||
|
|
fee4716343 | ||
|
|
6e9a675b3f | ||
|
|
7f5a444350 | ||
|
|
d2916ac5c7 | ||
|
|
3369a15285 | ||
|
|
58aee30de7 | ||
|
|
ef919241a6 | ||
|
|
d5386bb9a7 | ||
|
|
bf46ea5611 | ||
|
|
b8a379c9c9 | ||
|
|
8c37a9c2ef | ||
|
|
963a72ce01 | ||
|
|
a4962e21d1 | ||
|
|
9e200531b1 | ||
|
|
04683f2032 | ||
|
|
b41f7994da | ||
|
|
13a5ffe391 | ||
|
|
85deea82e4 | ||
|
|
89a8ea7a91 | ||
|
|
c8912eb6a0 | ||
|
|
01674949a1 | ||
|
|
98e1d3ee73 | ||
|
|
50d7a80331 | ||
|
|
bc3e8e1abd | ||
|
|
30e80d0716 | ||
|
|
f288920696 | ||
|
|
fa2bbd705c | ||
|
|
43a794860f | ||
|
|
adfe6b3bad | ||
|
|
091ccb649c | ||
|
|
2e02d49578 | ||
|
|
514535ad46 | ||
|
|
b010591c96 | ||
|
|
1aaee9edce | ||
|
|
3f0e9f5fca | ||
|
|
cfd0d28742 | ||
|
|
e7a2b322ec | ||
|
|
d3a0805a2b | ||
|
|
d4edf8ac18 | ||
|
|
87d14b000a | ||
|
|
12bded980b | ||
|
|
6e0e76af9d | ||
|
|
6f9b2f7b9b | ||
|
|
f61d79396d | ||
|
|
9b22e38450 | ||
|
|
9e4fe18830 | ||
|
|
ae5cc1ab37 | ||
|
|
d4be38ec02 | ||
|
|
115cff3007 | ||
|
|
70b862f026 | ||
|
|
321cf23e9c | ||
|
|
7e8d4bd915 | ||
|
|
06f45e0ac0 | ||
|
|
4af2f01abc | ||
|
|
dd3fff6eae | ||
|
|
ca6631746a | ||
|
|
e5fe0b1476 | ||
|
|
4c5764204d | ||
|
|
d70f40229e | ||
|
|
05b13b47b5 | ||
|
|
ecd52aa809 | ||
|
|
269abb1aee | ||
|
|
d91cbb2626 | ||
|
|
9073d17313 | ||
|
|
cc20d93f47 | ||
|
|
cb1507fa96 | ||
|
|
b0b3b04b3b | ||
|
|
8d878d0a5f | ||
|
|
8d353039a6 | ||
|
|
4b777db20b | ||
|
|
16ad0c2aef | ||
|
|
e46dec2a94 | ||
|
|
2b54b63cb3 | ||
|
|
f2eb5f35f6 | ||
|
|
d9a36ef45c | ||
|
|
eade7710e7 | ||
|
|
e6470d998c | ||
|
|
0c0fb93111 | ||
|
|
3f60764bd4 | ||
|
|
8f84f91666 | ||
|
|
2c91772bf1 | ||
|
|
c3f721755d | ||
|
|
8a952583a5 | ||
|
|
fc5bd21e28 | ||
|
|
be73a10a97 | ||
|
|
7edf8eb233 | ||
|
|
99144dcbba | ||
|
|
8f90f830bd | ||
|
|
456108f29e | ||
|
|
f7968aad1c | ||
|
|
2a587d21c4 | ||
|
|
4b0df05907 | ||
|
|
a92af34825 | ||
|
|
8ffde402f6 | ||
|
|
117d8d9fdb | ||
|
|
5050f42b8b | ||
|
|
fcbcdea067 | ||
|
|
d4e68bf66b | ||
|
|
743d160fdd | ||
|
|
dc95f36bc1 | ||
|
|
d3e3af377a | ||
|
|
db4812fbfa | ||
|
|
ff9cbab5fa | ||
|
|
30d8ab5f2f | ||
|
|
d71a4195d6 | ||
|
|
64ed9b175f | ||
|
|
2b10340e4e | ||
|
|
3c596f8d11 | ||
|
|
6a9c221841 | ||
|
|
c49b24ff90 | ||
|
|
edbbfd1e86 | ||
|
|
0e0af7499c | ||
|
|
eb4fe3ef4c | ||
|
|
70eb0f21d9 | ||
|
|
12378bae27 | ||
|
|
3c08c4df3a | ||
|
|
897509ae10 | ||
|
|
0eb7ee2e16 | ||
|
|
c1ebfb7e04 | ||
|
|
3d62058693 | ||
|
|
122890799f | ||
|
|
65078d5846 | ||
|
|
92f304902d | ||
|
|
45477a6c7d | ||
|
|
79b549b5a4 | ||
|
|
318880b4ad | ||
|
|
75521dcf6e | ||
|
|
8bf20dd545 | ||
|
|
744bce1246 | ||
|
|
c817fc5c57 | ||
|
|
0bb4d0a985 | ||
|
|
a8605abd34 | ||
|
|
953fb4490b | ||
|
|
b17c3d18af | ||
|
|
b45580fa19 | ||
|
|
1c26f40078 | ||
|
|
667ad093eb | ||
|
|
2c369aedf5 | ||
|
|
7a0d5ab0b4 | ||
|
|
75582b804b | ||
|
|
73452551c6 | ||
|
|
cb3cf5068b | ||
|
|
428f518771 | ||
|
|
0411a41e11 | ||
|
|
07b37bcd12 | ||
|
|
0506826ff5 | ||
|
|
4fcd36a5ab | ||
|
|
b2f43f39ba | ||
|
|
074d73d12b | ||
|
|
6457bcf51e | ||
|
|
8d12519f3d | ||
|
|
8a7c401366 | ||
|
|
0aae8f346f | ||
|
|
e991328967 | ||
|
|
614d02a673 | ||
|
|
018ebdded5 | ||
|
|
fc08983d71 | ||
|
|
7b61084891 | ||
|
|
d1ac6c2fe1 | ||
|
|
da9c99272c | ||
|
|
9c7594d78f | ||
|
|
70226cc653 | ||
|
|
c20e4bd99c | ||
|
|
ccfe153e9b | ||
|
|
c9730bcaaf | ||
|
|
03dd7486c1 | ||
|
|
6249009fdf | ||
|
|
8e2d76459f | ||
|
|
5e539c6a72 | ||
|
|
8866112400 | ||
|
|
bfdd5e2c22 | ||
|
|
f3f16cd2b9 | ||
|
|
d84ea2ec52 | ||
|
|
b259241c07 | ||
|
|
a8ab0730a7 | ||
|
|
cef207cf94 | ||
|
|
e728ea32d1 | ||
|
|
ccdee0420f | ||
|
|
8a51e11d23 | ||
|
|
9083f1ff15 | ||
|
|
2964b1a169 | ||
|
|
b6767820de | ||
|
|
821e7fce45 | ||
|
|
b7c6268d3e | ||
|
|
521d6b88d4 | ||
|
|
cf767b0856 | ||
|
|
25f7809822 | ||
|
|
74c0b1ea3b | ||
|
|
f4dcb1e9cf | ||
|
|
90f1d023ff | ||
|
|
e9c5f2d4e8 | ||
|
|
1249e9b5ac | ||
|
|
d47bc5f6c4 | ||
|
|
efb1794135 | ||
|
|
71b98a03a9 | ||
|
|
8e625c6593 | ||
|
|
6b2cd7c631 | ||
|
|
aa4aead63c | ||
|
|
c491d12cd0 | ||
|
|
9e4d703a56 | ||
|
|
fc0c0a7771 | ||
|
|
d5cc0d83b0 | ||
|
|
52762dc866 | ||
|
|
3c092cfc17 | ||
|
|
7f3f1af541 | ||
|
|
f885c481f0 | ||
|
|
865d4b2bda | ||
|
|
3cb1e65eb6 | ||
|
|
f667346718 | ||
|
|
c6e1f59415 | ||
|
|
f353c92852 | ||
|
|
1e88c6a18b | ||
|
|
7242aed1c3 | ||
|
|
81e63785fe | ||
|
|
c7937f53d4 | ||
|
|
58fa1c975f | ||
|
|
da49fc1b6d | ||
|
|
df9c921dd5 | ||
|
|
d9c227eff6 | ||
|
|
524c285d88 | ||
|
|
4107246335 | ||
|
|
87a65ec6a5 | ||
|
|
c6d0b61982 | ||
|
|
88e30eecbf | ||
|
|
f904378c4d | ||
|
|
24eb8dcde0 | ||
|
|
a97425d9cb | ||
|
|
c51878f9a9 | ||
|
|
92f0a73ac6 | ||
|
|
163c149f3f | ||
|
|
224ca0ae8e | ||
|
|
5bf6cd1f4f | ||
|
|
555739eec5 | ||
|
|
c036ce90fe | ||
|
|
6163ae7cc7 | ||
|
|
cd950e30cb | ||
|
|
89dfae96ad | ||
|
|
c0a2d730a6 | ||
|
|
592407230b | ||
|
|
a7c3ddb482 | ||
|
|
7a1813c531 | ||
|
|
16e3d1becd | ||
|
|
c0f6b910ae | ||
|
|
e3bf8dc122 | ||
|
|
086a835131 | ||
|
|
d0668de192 | ||
|
|
4df974ccc4 | ||
|
|
a50c903a82 | ||
|
|
97a8092c14 | ||
|
|
526565b810 | ||
|
|
64804b81bd | ||
|
|
e10f516a5e | ||
|
|
fe62a2bb4e | ||
|
|
d6ecb949ca | ||
|
|
a845a96538 | ||
|
|
92f30fda8d | ||
|
|
559ef2eba8 | ||
|
|
17b25d7ce2 | ||
|
|
fe3253eefd | ||
|
|
c38ca6b2d1 | ||
|
|
5aa9811084 | ||
|
|
3cae373064 | ||
|
|
b6b8526fb4 | ||
|
|
6f86143176 | ||
|
|
beffef2882 | ||
|
|
96f5bbcdd7 | ||
|
|
27ce78bee4 | ||
|
|
898a59062b | ||
|
|
c5f55243e1 | ||
|
|
62a9727ab5 | ||
|
|
16f1e08b73 | ||
|
|
4280ec75cc | ||
|
|
b064cc2116 | ||
|
|
f8b50f8d8f | ||
|
|
9d464e8e9a | ||
|
|
92fea7eb1b | ||
|
|
f226d12a2f | ||
|
|
359260c49d | ||
|
|
125c8a98bb | ||
|
|
81fccd9c39 | ||
|
|
1dc3421c7f | ||
|
|
073184132e | ||
|
|
476ff65fd7 | ||
|
|
2847412433 | ||
|
|
5c81132da0 | ||
|
|
6e1c7b9239 | ||
|
|
e469c8974c | ||
|
|
629b427443 | ||
|
|
108504963c | ||
|
|
6aa09fb1d6 | ||
|
|
bfa6852334 | ||
|
|
63d55d4a39 | ||
|
|
578ee49550 | ||
|
|
dda6a863e9 | ||
|
|
99358cee88 | ||
|
|
768a4236e6 | ||
|
|
ffbf002ba8 | ||
|
|
4a1b5b864c | ||
|
|
3b3096c940 | ||
|
|
51fd697c7a | ||
|
|
210acb42cd | ||
|
|
6c36615efe | ||
|
|
d4e2717081 | ||
|
|
013c563293 | ||
|
|
41a407dcc9 | ||
|
|
cf1f5a7af6 | ||
|
|
597872e5d7 | ||
|
|
e2d6872745 | ||
|
|
ddebca8d42 | ||
|
|
5173ca0454 | ||
|
|
ccac9813f3 | ||
|
|
9133fd03df | ||
|
|
2e891f4ff8 | ||
|
|
3c66d9ccb1 | ||
|
|
badf16cc34 | ||
|
|
0ee7cd80f2 | ||
|
|
aeb43c6a4c | ||
|
|
12322a2141 | ||
|
|
4fd5a3d0a2 | ||
|
|
3594330177 | ||
|
|
15510c66d4 | ||
|
|
dfa4d94827 | ||
|
|
36b89960e3 | ||
|
|
a3f3fc61ee | ||
|
|
b8fde4fc46 | ||
|
|
c37fe733df | ||
|
|
b31659904f | ||
|
|
ebcf51336e | ||
|
|
a334bba643 | ||
|
|
d4fd93e7f3 | ||
|
|
6644bdba0f | ||
|
|
68a65e878f | ||
|
|
7606ad8294 | ||
|
|
32847e88b4 | ||
|
|
2e879586bd | ||
|
|
9d55b2411f | ||
|
|
fe880c0fac | ||
|
|
b160089be7 | ||
|
|
c2254164f8 | ||
|
|
e57b94c4ac | ||
|
|
3273bf3716 | ||
|
|
f5501edfcf | ||
|
|
2404831725 | ||
|
|
9f0e237931 | ||
|
|
f752eaa298 | ||
|
|
1f8373fae8 | ||
|
|
b94f80b9d7 | ||
|
|
5f4e983ccb | ||
|
|
28b6f38135 | ||
|
|
6adb4056bb | ||
|
|
0b9671313b | ||
|
|
e0c99d6203 | ||
|
|
7af1a930b7 | ||
|
|
6e46ee4ffa | ||
|
|
4f1fc1a84e | ||
|
|
c10b6c5e8e | ||
|
|
52ff407116 | ||
|
|
078d202f39 | ||
|
|
3e105f7e58 | ||
|
|
02ca72e30c | ||
|
|
e567c52457 | ||
|
|
10501d0398 | ||
|
|
972ed42661 | ||
|
|
48802b0a3b | ||
|
|
a9c7c493cf | ||
|
|
49f6ed5f5e | ||
|
|
a5d03e0ada | ||
|
|
199f61cefa | ||
|
|
fa78c6443e | ||
|
|
52e2e4b84c | ||
|
|
1c933372fe | ||
|
|
f5dfe3f5a6 | ||
|
|
5702b7578c | ||
|
|
703788b40e | ||
|
|
aef9c2117e | ||
|
|
2a42d95385 | ||
|
|
e37775bb41 | ||
|
|
780f4040ea | ||
|
|
0b7be6ffb9 | ||
|
|
4d9a165e56 | ||
|
|
21e5fa192a | ||
|
|
cf571ad661 | ||
|
|
b1456835d8 | ||
|
|
b930c4b437 | ||
|
|
cebd588092 | ||
|
|
3c981e6c2c | ||
|
|
6054c4e49d | ||
|
|
028316ba5d | ||
|
|
df457f5802 | ||
|
|
084e35c49d | ||
|
|
90ea4a73ad | ||
|
|
efe8ac8f35 | ||
|
|
894ef3b375 | ||
|
|
385465bfa9 | ||
|
|
0148bd4668 | ||
|
|
0f7ecf6f06 | ||
|
|
08e81f8420 | ||
|
|
0ac2d2f50f | ||
|
|
42fcb0a6fc | ||
|
|
490dd14bc5 | ||
|
|
943ea0acae | ||
|
|
d64a97f973 | ||
|
|
5d8f1d4b88 | ||
|
|
b1d774c2e3 | ||
|
|
fad579c4a2 | ||
|
|
37120ef7bd | ||
|
|
cba653d502 | ||
|
|
2a90de9502 | ||
|
|
bff229713a | ||
|
|
117f583ebe | ||
|
|
205667143c | ||
|
|
fe84cbdc9d | ||
|
|
533c6438f3 | ||
|
|
b587b094c9 | ||
|
|
525798e1a5 | ||
|
|
ea63052d36 | ||
|
|
b5a99c5011 | ||
|
|
56b7015675 | ||
|
|
4ff970ebab | ||
|
|
dccb5144c3 | ||
|
|
33b087171a | ||
|
|
58d9ae1c60 | ||
|
|
20302ab6b9 | ||
|
|
6fb0de62a4 | ||
|
|
839eef0db2 | ||
|
|
267eebe5c9 | ||
|
|
755d72a591 | ||
|
|
4d38424e6c | ||
|
|
53624222c9 | ||
|
|
44e83d77d7 | ||
|
|
19aa366d88 | ||
|
|
3fb4164d87 | ||
|
|
4e2b78f65d | ||
|
|
e47f59e1f9 | ||
|
|
63c4fef27a | ||
|
|
a7a7c1d592 | ||
|
|
6a7e68aaf2 | ||
|
|
6e7a3795f1 | ||
|
|
177337686a | ||
|
|
ccef29bbff | ||
|
|
64b3d1d539 | ||
|
|
aab6643cea | ||
|
|
2a1e28f5f5 | ||
|
|
db9205b298 | ||
|
|
964c6204dd | ||
|
|
65f7eb0fba | ||
|
|
401cf81034 | ||
|
|
431386085f | ||
|
|
bf150a5b7d | ||
|
|
ddecfe6e77 | ||
|
|
68e40dc141 | ||
|
|
325f400a88 | ||
|
|
be33e281b3 | ||
|
|
0010090d05 | ||
|
|
b7f26937f1 | ||
|
|
5037d7368d | ||
|
|
0ccf65017f | ||
|
|
85d467e16a | ||
|
|
cf4b55d965 | ||
|
|
e0d477804b | ||
|
|
4fc9583feb | ||
|
|
904c9b2e24 | ||
|
|
cdfd748241 | ||
|
|
661027f2cf | ||
|
|
7ecd1638eb | ||
|
|
06b92ddeb3 | ||
|
|
ceef78ce44 | ||
|
|
6560ea9bdc | ||
|
|
cda82f3d30 | ||
|
|
7da2d8b507 | ||
|
|
fb7919928c | ||
|
|
5d670fc54a | ||
|
|
b5e72e2fc3 | ||
|
|
8997993a30 | ||
|
|
b721f363e5 | ||
|
|
d93dad22fe | ||
|
|
e27bf8b738 | ||
|
|
539e96cc1f | ||
|
|
5086aad0b2 | ||
|
|
c1b414e2cf | ||
|
|
2ff8aa1c20 | ||
|
|
6d2a72367a | ||
|
|
9df751d4ec | ||
|
|
e175c863aa | ||
|
|
64cd8ae0f0 | ||
|
|
46b498b86a | ||
|
|
b76cd74087 | ||
|
|
3b49fd24d4 | ||
|
|
c0515a51a5 | ||
|
|
dc9c87279b | ||
|
|
057fdb3a9d | ||
|
|
3daf62cf3d | ||
|
|
0ef495fa76 | ||
|
|
722c567504 | ||
|
|
0ebe1c0f81 | ||
|
|
2dc06b2548 | ||
|
|
b52aabd8fe | ||
|
|
6494ac037f | ||
|
|
5c3a1bbf30 | ||
|
|
c837664653 | ||
|
|
77429b154e | ||
|
|
39b8f17ebb | ||
|
|
81ecfb0f64 | ||
|
|
656e789c5b | ||
|
|
fe19184084 | ||
|
|
b4990cd858 | ||
|
|
8e955c6b13 | ||
|
|
3a5ddfcd3c | ||
|
|
ac3f7a87c3 | ||
|
|
4e9b63e141 | ||
|
|
7fd7fe3c82 | ||
|
|
9dff45563d | ||
|
|
83cf8fb821 | ||
|
|
32e79a5c5c | ||
|
|
fc44a8114e | ||
|
|
657172ef77 | ||
|
|
71eb4199c3 | ||
|
|
ac3c21368d | ||
|
|
db71b2bd5f | ||
|
|
8cfe42d09f | ||
|
|
e673a28a72 | ||
|
|
59889ce46b | ||
|
|
62e8a01e7e | ||
|
|
87eaf37629 | ||
|
|
7c7606a6cf | ||
|
|
dbb21165d4 | ||
|
|
375953cba3 | ||
|
|
af5385b344 | ||
|
|
347be176af | ||
|
|
bf5a4774c6 | ||
|
|
0275d3edf2 | ||
|
|
be53ae98f8 | ||
|
|
0d9fe51632 | ||
|
|
03bd795221 | ||
|
|
5a4026ccb4 | ||
|
|
b1d4de69c2 | ||
|
|
5316acd046 | ||
|
|
2c72842c10 | ||
|
|
4a81f12c26 | ||
|
|
aabda1cda2 | ||
|
|
572fe20f8e | ||
|
|
2fd4c45b34 | ||
|
|
ec5489e23f | ||
|
|
6898375a2d | ||
|
|
d413443a6a | ||
|
|
5039747f26 | ||
|
|
11ba4ac539 | ||
|
|
b4ed7fb7d7 | ||
|
|
719473565e | ||
|
|
bd7278d7e9 | ||
|
|
45ba81c726 | ||
|
|
530658e0cc | ||
|
|
b742705d0c | ||
|
|
cd3b08d8cf | ||
|
|
009660a489 | ||
|
|
4b6c7c6d84 | ||
|
|
a7db375f5d | ||
|
|
101dcfe157 | ||
|
|
aec87b74d3 | ||
|
|
91c8f92ccb | ||
|
|
965bf19065 | ||
|
|
15ef3b90fa | ||
|
|
f6efaf2a63 | ||
|
|
0e7c495395 | ||
|
|
ff0ded8f11 | ||
|
|
110bf468a4 | ||
|
|
d4e86f4d8b | ||
|
|
6091a0362b | ||
|
|
33d2747829 | ||
|
|
c9e5f45d73 | ||
|
|
2f66537514 | ||
|
|
a491312c7d | ||
|
|
45b7690867 | ||
|
|
30ef1ddb23 | ||
|
|
424d8e3123 | ||
|
|
04dfa6d923 | ||
|
|
fdff1a54ee | ||
|
|
42240f4b5d | ||
|
|
7692ef289f | ||
|
|
bfb7b88371 | ||
|
|
5f70918e2c | ||
|
|
abf11271fe | ||
|
|
a36e89bb61 | ||
|
|
35614acf59 | ||
|
|
7e4b8e33f5 | ||
|
|
5151a663f0 | ||
|
|
b85a1b684b | ||
|
|
8fa8f146fa | ||
|
|
6cad0a013e | ||
|
|
aa743cbc60 | ||
|
|
a389a2979b | ||
|
|
d6f0d1d349 | ||
|
|
4ed6960d95 | ||
|
|
731af0c0ab | ||
|
|
5499fd3b59 | ||
|
|
e0e697ca11 | ||
|
|
05f000b076 | ||
|
|
a34c839514 | ||
|
|
6a217c7dc1 | ||
|
|
e1748a3183 | ||
|
|
bc08e05a00 | ||
|
|
9218b69afe | ||
|
|
0ce2e12d9f | ||
|
|
7224b76801 | ||
|
|
d2398ccb59 | ||
|
|
0988fd9e9f | ||
|
|
51cde23e82 | ||
|
|
caac95ff54 | ||
|
|
19f4580aca | ||
|
|
27f448d14d | ||
|
|
500698c5be | ||
|
|
91af6da068 | ||
|
|
b8835fe7b4 | ||
|
|
48d9e88e8f | ||
|
|
4e7ee9310e | ||
|
|
d629102fa6 | ||
|
|
db1ed69693 | ||
|
|
06657c49a0 | ||
|
|
f1d2f2b2c8 | ||
|
|
a5abe4b8b3 | ||
|
|
c0339327be | ||
|
|
353bc3130e | ||
|
|
126f00882b | ||
|
|
44c3f5e1e8 | ||
|
|
c47c94e485 | ||
|
|
1f328fbcfd | ||
|
|
7f1240516e | ||
|
|
f9946b37f9 | ||
|
|
96fe25cf0a | ||
|
|
a176d4cbda | ||
|
|
e704e33045 | ||
|
|
2f3e90f671 | ||
|
|
65012beea4 | ||
|
|
704217b698 | ||
|
|
6ade1055d5 | ||
|
|
6a983d601c | ||
|
|
eaafae95fa | ||
|
|
5ca1436c24 | ||
|
|
c46e93cc42 | ||
|
|
66943d3d79 | ||
|
|
a78bc093de | ||
|
|
2446c4928d | ||
|
|
e11e679e90 | ||
|
|
ba8e538173 | ||
|
|
40111ba5e1 | ||
|
|
ab58ae5b03 | ||
|
|
ca8860177e | ||
|
|
d65d1a44b3 | ||
|
|
c1763a3f95 | ||
|
|
964fcd5f59 | ||
|
|
c6281a1217 | ||
|
|
ff3f8f0b33 | ||
|
|
2d844a26c3 | ||
|
|
1b68492c85 | ||
|
|
acd5a893e2 | ||
|
|
0214a59a8c | ||
|
|
6079cab090 | ||
|
|
bf57087a6e | ||
|
|
d8bc542ffc | ||
|
|
01ccf204f4 | ||
|
|
84b64dcdf9 | ||
|
|
8cc1020a58 | ||
|
|
1e2b354456 | ||
|
|
f639cd9c78 | ||
|
|
e50f995d87 | ||
|
|
abe884e744 | ||
|
|
173b2ac956 | ||
|
|
1317fdb9b8 | ||
|
|
1072173d58 | ||
|
|
df19c6f7bf | ||
|
|
ee72554fb9 | ||
|
|
abb4f77568 | ||
|
|
ca2b27422f | ||
|
|
740f6b318c | ||
|
|
f307d929a8 | ||
|
|
ceea6753ee | ||
|
|
2bafbf3c04 | ||
|
|
3e14ba54b8 | ||
|
|
2f7a30cf61 | ||
|
|
0ad925278d | ||
|
|
e3053350f3 | ||
|
|
b9207e5727 | ||
|
|
40159e7a16 | ||
|
|
16baa24964 | ||
|
|
72f06bcc4b | ||
|
|
c527dd8c9c | ||
|
|
29fd894189 |
92
.github/workflows/build.yml
vendored
92
.github/workflows/build.yml
vendored
@@ -23,15 +23,18 @@ jobs:
|
||||
build:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 60
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -42,14 +45,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -58,14 +61,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -75,20 +78,14 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.21
|
||||
- job_name: go1.24
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.22
|
||||
os: ubuntu-latest
|
||||
go: '1.22'
|
||||
go: '1.24'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -98,18 +95,17 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
@@ -118,16 +114,15 @@ jobs:
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
run: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||
@@ -156,7 +151,6 @@ jobs:
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Print Go version and environment
|
||||
shell: bash
|
||||
run: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
@@ -168,29 +162,24 @@ jobs:
|
||||
env
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Rclone version
|
||||
shell: bash
|
||||
run: |
|
||||
rclone version
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make quicktest
|
||||
if: matrix.quicktest
|
||||
|
||||
- name: Race test
|
||||
shell: bash
|
||||
run: |
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Run librclone tests
|
||||
shell: bash
|
||||
run: |
|
||||
make -C librclone/ctest test
|
||||
make -C librclone/ctest clean
|
||||
@@ -198,14 +187,12 @@ jobs:
|
||||
if: matrix.librclonetest
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make compile_all
|
||||
if: matrix.compile_all
|
||||
|
||||
- name: Deploy built binaries
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||
make ci_beta
|
||||
@@ -224,19 +211,20 @@ jobs:
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
@@ -251,13 +239,13 @@ jobs:
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
@@ -265,7 +253,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
@@ -273,7 +261,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
@@ -281,7 +269,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
@@ -294,6 +282,23 @@ jobs:
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
|
||||
- name: Check Markdown format
|
||||
uses: DavidAnson/markdownlint-cli2-action@v20
|
||||
with:
|
||||
globs: |
|
||||
CONTRIBUTING.md
|
||||
MAINTAINERS.md
|
||||
README.md
|
||||
RELEASE.md
|
||||
CODE_OF_CONDUCT.md
|
||||
librclone\README.md
|
||||
backend\s3\README.md
|
||||
docs/content/{_index,authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||
if: github.event_name == 'pull_request'
|
||||
|
||||
android:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
@@ -302,18 +307,17 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '>=1.25.0-rc.1'
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
@@ -332,7 +336,6 @@ jobs:
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
@@ -346,7 +349,6 @@ jobs:
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
@@ -359,7 +361,6 @@ jobs:
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
@@ -372,7 +373,6 @@ jobs:
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build-image:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/386
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/arm64
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v7
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v6
|
||||
runs-on: ubuntu-24.04-arm
|
||||
|
||||
name: Build Docker Image for ${{ matrix.platform }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
def slugify(input_string, max_length=63):
|
||||
slug = input_string.lower()
|
||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
||||
slug = slug.strip()
|
||||
slug = re.sub(r'\s+', '-', slug)
|
||||
slug = re.sub(r'-+', '-', slug)
|
||||
slug = slug[:max_length]
|
||||
slug = re.sub(r'[-]+$', '', slug)
|
||||
return slug
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
go-build-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Publish Image Digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
provenance: false
|
||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
||||
# tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-image
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_NAME }}
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Extract Tags
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
||||
tags_string = " ".join(tags)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"TAGS={tags_string}\n")
|
||||
|
||||
- name: Extract Annotations
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
||||
annotations_string = " ".join(annotations)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create & Push Manifest List
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
${{ env.TAGS }} \
|
||||
${{ env.ANNOTATIONS }} \
|
||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect and Run Multi-Platform Image
|
||||
run: |
|
||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
||||
49
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
49
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
||||
|
||||
name: Release Build for Docker Plugin
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build_docker_volume_plugin:
|
||||
if: inputs.manual || github.repository == 'rclone/rclone'
|
||||
name: Build docker plugin job
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
@@ -1,89 +0,0 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
id: actual_patch_version
|
||||
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||
- name: Get actual minor version
|
||||
id: actual_minor_version
|
||||
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
261
.golangci.yml
261
.golangci.yml
@@ -1,144 +1,151 @@
|
||||
# golangci-lint configuration options
|
||||
version: "2"
|
||||
|
||||
linters:
|
||||
# Configure the linter set. To avoid unexpected results the implicit default
|
||||
# set is ignored and all the ones to use are explicitly enabled.
|
||||
default: none
|
||||
enable:
|
||||
# Default
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- govet
|
||||
- unconvert
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
# Additional
|
||||
- gocritic
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
- misspell
|
||||
#- prealloc # TODO
|
||||
- revive
|
||||
- unconvert
|
||||
# Configure checks. Mostly using defaults but with some commented exceptions.
|
||||
settings:
|
||||
govet:
|
||||
enable-all: true
|
||||
disable:
|
||||
- fieldalignment
|
||||
- shadow
|
||||
staticcheck:
|
||||
# With staticcheck there is only one setting, so to extend the implicit
|
||||
# default value it must be explicitly included.
|
||||
checks:
|
||||
# Default
|
||||
- all
|
||||
- -ST1000
|
||||
- -ST1003
|
||||
- -ST1016
|
||||
- -ST1020
|
||||
- -ST1021
|
||||
- -ST1022
|
||||
# Disable quickfix checks
|
||||
- -QF*
|
||||
gocritic:
|
||||
# With gocritic there are different settings, but since enabled-checks
|
||||
# and disabled-checks cannot both be set, for full customization the
|
||||
# alternative is to disable all defaults and explicitly enable the ones
|
||||
# to use.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Skip default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Skip default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Skip default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Skip default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Skip default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Enable additional check that are not enabled by default
|
||||
#- singleCaseSwitch # Skip default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: ${base-path}/bin/rules.go
|
||||
revive:
|
||||
# With revive there is in reality only one setting, and when at least one
|
||||
# rule are specified then only these rules will be considered, defaults
|
||||
# and all others are then implicitly disabled, so must explicitly enable
|
||||
# all rules to be used.
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: false
|
||||
- name: context-as-argument
|
||||
disabled: false
|
||||
- name: context-keys-type
|
||||
disabled: false
|
||||
- name: dot-imports
|
||||
disabled: false
|
||||
#- name: empty-block # Skip default
|
||||
# disabled: true
|
||||
- name: error-naming
|
||||
disabled: false
|
||||
- name: error-return
|
||||
disabled: false
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
- name: errorf
|
||||
disabled: false
|
||||
- name: exported
|
||||
disabled: false
|
||||
#- name: increment-decrement # Skip default
|
||||
# disabled: true
|
||||
- name: indent-error-flow
|
||||
disabled: false
|
||||
- name: package-comments
|
||||
disabled: false
|
||||
- name: range
|
||||
disabled: false
|
||||
- name: receiver-naming
|
||||
disabled: false
|
||||
#- name: redefines-builtin-id # Skip default
|
||||
# disabled: true
|
||||
#- name: superfluous-else # Skip default
|
||||
# disabled: true
|
||||
- name: time-naming
|
||||
disabled: false
|
||||
- name: unexported-return
|
||||
disabled: false
|
||||
#- name: unreachable-code # Skip default
|
||||
# disabled: true
|
||||
#- name: unused-parameter # Skip default
|
||||
# disabled: true
|
||||
- name: var-declaration
|
||||
disabled: false
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- goimports
|
||||
|
||||
issues:
|
||||
# Enable some lints excluded by default
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
# don't disable the revive messages about comments on exported functions
|
||||
include:
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
- EXC0014
|
||||
- EXC0015
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
# setting rules seems to disable all the rules, so re-enable them here
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: false
|
||||
- name: context-as-argument
|
||||
disabled: false
|
||||
- name: context-keys-type
|
||||
disabled: false
|
||||
- name: dot-imports
|
||||
disabled: false
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: false
|
||||
- name: error-return
|
||||
disabled: false
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
- name: errorf
|
||||
disabled: false
|
||||
- name: exported
|
||||
disabled: false
|
||||
- name: increment-decrement
|
||||
disabled: true
|
||||
- name: indent-error-flow
|
||||
disabled: false
|
||||
- name: package-comments
|
||||
disabled: false
|
||||
- name: range
|
||||
disabled: false
|
||||
- name: receiver-naming
|
||||
disabled: false
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
- name: time-naming
|
||||
disabled: false
|
||||
- name: unexported-return
|
||||
disabled: false
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: var-declaration
|
||||
disabled: false
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
gocritic:
|
||||
# Enable all default checks with some exceptions and some additions (commented).
|
||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Enabled by default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Enabled by default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Enabled by default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Enabled by default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Enabled by default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Not enabled by default
|
||||
#- singleCaseSwitch # Enabled by default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: "${configDir}/bin/rules.go"
|
||||
|
||||
72
.markdownlint.yml
Normal file
72
.markdownlint.yml
Normal file
@@ -0,0 +1,72 @@
|
||||
default: true
|
||||
|
||||
# Use specific styles, to be consistent accross all documents.
|
||||
# Default is to accept any as long as it is consistent within the same document.
|
||||
heading-style: # MD003
|
||||
style: atx
|
||||
ul-style: # MD004
|
||||
style: dash
|
||||
hr-style: # MD035
|
||||
style: ---
|
||||
code-block-style: # MD046
|
||||
style: fenced
|
||||
code-fence-style: # MD048
|
||||
style: backtick
|
||||
emphasis-style: # MD049
|
||||
style: asterisk
|
||||
strong-style: # MD050
|
||||
style: asterisk
|
||||
|
||||
# Allow multiple headers with same text as long as they are not siblings.
|
||||
no-duplicate-heading: # MD024
|
||||
siblings_only: true
|
||||
|
||||
# Allow long lines in code blocks and tables.
|
||||
line-length: # MD013
|
||||
code_blocks: false
|
||||
tables: false
|
||||
|
||||
# The Markdown files used to generated docs with Hugo contain a top level
|
||||
# header, even though the YAML front matter has a title property (which is
|
||||
# used for the HTML document title only). Suppress Markdownlint warning:
|
||||
# Multiple top-level headings in the same document.
|
||||
single-title: # MD025
|
||||
level: 1
|
||||
front_matter_title:
|
||||
|
||||
# The HTML docs generated by Hugo from Markdown files may have slightly
|
||||
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
|
||||
# leading dashes so "--config string" becomes "#config-string" while it is
|
||||
# "#--config-string" in GitHub preview. When writing links to headers in the
|
||||
# Markdown files we must use whatever works in the final HTML generated docs.
|
||||
# Suppress Markdownlint warning: Link fragments should be valid.
|
||||
link-fragments: false # MD051
|
||||
|
||||
# Restrict the languages and language identifiers to use for code blocks.
|
||||
# We only want those supported by both Hugo and GitHub. These are documented
|
||||
# here:
|
||||
# https://gohugo.io/content-management/syntax-highlighting/#languages
|
||||
# https://docs.github.com//get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting
|
||||
# In addition, we only want to allow identifiers (aliases) that correspond to
|
||||
# the same language in Hugo and GitHub, and preferrably also VSCode and other
|
||||
# commonly used tools, to avoid confusion. An example of this is that "shell"
|
||||
# by some are considered an identifier for shell scripts, i.e. an alias for
|
||||
# "sh", while others consider it an identifier for shell sessions, i.e. an
|
||||
# alias for "console". Although Hugo and GitHub in this case are consistent and
|
||||
# have choosen the former, using "sh" instead, and not allowing use of "shell",
|
||||
# avoids the confusion entirely.
|
||||
fenced-code-language: # MD040
|
||||
allowed_languages:
|
||||
- text
|
||||
- console
|
||||
- sh
|
||||
- bat
|
||||
- ini
|
||||
- json
|
||||
- yaml
|
||||
- go
|
||||
- python
|
||||
- c++
|
||||
- c#
|
||||
- java
|
||||
- powershell
|
||||
80
CODE_OF_CONDUCT.md
Normal file
80
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Rclone Code of Conduct
|
||||
|
||||
Like the technical community as a whole, the Rclone team and community
|
||||
is made up of a mixture of professionals and volunteers from all over
|
||||
the world, working on every aspect of the mission - including
|
||||
mentorship, teaching, and connecting people.
|
||||
|
||||
Diversity is one of our huge strengths, but it can also lead to
|
||||
communication issues and unhappiness. To that end, we have a few
|
||||
ground rules that we ask people to adhere to. This code applies
|
||||
equally to founders, mentors and those seeking help and guidance.
|
||||
|
||||
This isn't an exhaustive list of things that you can't do. Rather,
|
||||
take it in the spirit in which it's intended - a guide to make it
|
||||
easier to enrich all of us and the technical communities in which we
|
||||
participate.
|
||||
|
||||
This code of conduct applies to all spaces managed by the Rclone
|
||||
project or Rclone Services Ltd. This includes the issue tracker, the
|
||||
forum, the GitHub site, the wiki, any other online services or
|
||||
in-person events. In addition, violations of this code outside these
|
||||
spaces may affect a person's ability to participate within them.
|
||||
|
||||
- **Be friendly and patient.**
|
||||
- **Be welcoming.** We strive to be a community that welcomes and
|
||||
supports people of all backgrounds and identities. This includes,
|
||||
but is not limited to members of any race, ethnicity, culture,
|
||||
national origin, colour, immigration status, social and economic
|
||||
class, educational level, sex, sexual orientation, gender identity
|
||||
and expression, age, size, family status, political belief,
|
||||
religion, and mental and physical ability.
|
||||
- **Be considerate.** Your work will be used by other people, and you
|
||||
in turn will depend on the work of others. Any decision you take
|
||||
will affect users and colleagues, and you should take those
|
||||
consequences into account when making decisions. Remember that we're
|
||||
a world-wide community, so you might not be communicating in someone
|
||||
else's primary language.
|
||||
- **Be respectful.** Not all of us will agree all the time, but
|
||||
disagreement is no excuse for poor behavior and poor manners. We
|
||||
might all experience some frustration now and then, but we cannot
|
||||
allow that frustration to turn into a personal attack. It's
|
||||
important to remember that a community where people feel
|
||||
uncomfortable or threatened is not a productive one. Members of the
|
||||
Rclone community should be respectful when dealing with other
|
||||
members as well as with people outside the Rclone community.
|
||||
- **Be careful in the words that you choose.** We are a community of
|
||||
professionals, and we conduct ourselves professionally. Be kind to
|
||||
others. Do not insult or put down other participants. Harassment and
|
||||
other exclusionary behavior aren't acceptable. This includes, but is
|
||||
not limited to:
|
||||
- Violent threats or language directed against another person.
|
||||
- Discriminatory jokes and language.
|
||||
- Posting sexually explicit or violent material.
|
||||
- Posting (or threatening to post) other people's personally
|
||||
identifying information ("doxing").
|
||||
- Personal insults, especially those using racist or sexist terms.
|
||||
- Unwelcome sexual attention.
|
||||
- Advocating for, or encouraging, any of the above behavior.
|
||||
- Repeated harassment of others. In general, if someone asks you to
|
||||
stop, then stop.
|
||||
- **When we disagree, try to understand why.** Disagreements, both
|
||||
social and technical, happen all the time and Rclone is no
|
||||
exception. It is important that we resolve disagreements and
|
||||
differing views constructively. Remember that we're different. The
|
||||
strength of Rclone comes from its varied community, people from a
|
||||
wide range of backgrounds. Different people have different
|
||||
perspectives on issues. Being unable to understand why someone holds
|
||||
a viewpoint doesn't mean that they're wrong. Don't forget that it is
|
||||
human to err and blaming each other doesn't get us anywhere.
|
||||
Instead, focus on helping to resolve issues and learning from
|
||||
mistakes.
|
||||
|
||||
If you believe someone is violating the code of conduct, we ask that
|
||||
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
|
||||
|
||||
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
|
||||
|
||||
## Questions?
|
||||
|
||||
If you have questions, please feel free to [contact us](mailto:info@rclone.com).
|
||||
562
CONTRIBUTING.md
562
CONTRIBUTING.md
@@ -15,61 +15,81 @@ with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
- Rclone version (e.g. output from `rclone version`)
|
||||
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
- if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
- A log of the command with the `-vv` flag (e.g. output from
|
||||
`rclone -vv copy /tmp remote:tmp`)
|
||||
- if the log contains secrets then edit the file with a text editor first to
|
||||
obscure them
|
||||
|
||||
## Submitting a new feature or bug fix
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
|
||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues)
|
||||
first so it can be discussed.
|
||||
|
||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
||||
page](https://github.com/rclone/rclone).
|
||||
|
||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution
|
||||
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
|
||||
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||
|
||||
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
||||
Next open your terminal, change directory to your preferred folder and initialise
|
||||
your local rclone project:
|
||||
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
# if you have SSH keys setup in your GitHub account:
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
# otherwise:
|
||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||
```console
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
# if you have SSH keys setup in your GitHub account:
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
# otherwise:
|
||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||
```
|
||||
|
||||
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
||||
Note that most of the terminal commands in the rest of this guide must be
|
||||
executed from the rclone folder created above.
|
||||
|
||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||
|
||||
go version
|
||||
```console
|
||||
go version
|
||||
```
|
||||
|
||||
Great, you can now compile and execute your own version of rclone:
|
||||
|
||||
go build
|
||||
./rclone version
|
||||
```console
|
||||
go build
|
||||
./rclone version
|
||||
```
|
||||
|
||||
(Note that you can also replace `go build` with `make`, which will include a
|
||||
more accurate version number in the executable as well as enable you to specify
|
||||
more build options.) Finally make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
```console
|
||||
git checkout -b my-new-feature
|
||||
```
|
||||
|
||||
And get hacking.
|
||||
|
||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins)
|
||||
and a quick view on the rclone [code organisation](#code-organisation).
|
||||
|
||||
When ready - test the affected functionality and run the unit tests for the code you changed
|
||||
When ready - test the affected functionality and run the unit tests for the
|
||||
code you changed
|
||||
|
||||
cd folder/with/changed/files
|
||||
go test -v
|
||||
```console
|
||||
cd folder/with/changed/files
|
||||
go test -v
|
||||
```
|
||||
|
||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
||||
This is typically enough if you made a simple bug fix, otherwise please read
|
||||
the rclone [testing](#testing) section too.
|
||||
|
||||
Make sure you
|
||||
|
||||
@@ -79,14 +99,19 @@ Make sure you
|
||||
|
||||
When you are done with that push your changes to GitHub:
|
||||
|
||||
git push -u origin my-new-feature
|
||||
```console
|
||||
git push -u origin my-new-feature
|
||||
```
|
||||
|
||||
and open the GitHub website to [create your pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
||||
Your changes will then get reviewed and you might get asked to fix some stuff.
|
||||
If so, then make the changes in the same branch, commit and push your updates to
|
||||
GitHub.
|
||||
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master)
|
||||
or [squash your commits](#squashing-your-commits).
|
||||
|
||||
## Using Git and GitHub
|
||||
|
||||
@@ -94,87 +119,118 @@ You may sometimes be asked to [base your changes on the latest master](#basing-y
|
||||
|
||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||
|
||||
git checkout my-new-feature # To switch to your branch
|
||||
git status # To see the new and changed files
|
||||
git add FILENAME # To select FILENAME for the commit
|
||||
git status # To verify the changes to be committed
|
||||
git commit # To do the commit
|
||||
git log # To verify the commit. Use q to quit the log
|
||||
```console
|
||||
git checkout my-new-feature # To switch to your branch
|
||||
git status # To see the new and changed files
|
||||
git add FILENAME # To select FILENAME for the commit
|
||||
git status # To verify the changes to be committed
|
||||
git commit # To do the commit
|
||||
git log # To verify the commit. Use q to quit the log
|
||||
```
|
||||
|
||||
You can modify the message or changes in the latest commit using:
|
||||
|
||||
git commit --amend
|
||||
```console
|
||||
git commit --amend
|
||||
```
|
||||
|
||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
If you amend to commits that have been pushed to GitHub, then you will have to
|
||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Replacing your previously pushed commits
|
||||
|
||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||
Note that you are about to rewrite the GitHub history of your branch. It is good
|
||||
practice to involve your collaborators before modifying commits that have been
|
||||
pushed to GitHub.
|
||||
|
||||
Your previously pushed commits are replaced by:
|
||||
|
||||
git push --force origin my-new-feature
|
||||
```console
|
||||
git push --force origin my-new-feature
|
||||
```
|
||||
|
||||
### Basing your changes on the latest master
|
||||
|
||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
To base your changes on the latest version of the
|
||||
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge --ff-only
|
||||
git push origin --follow-tags # optional update of your fork in GitHub
|
||||
git checkout my-new-feature
|
||||
git rebase master
|
||||
```console
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge --ff-only
|
||||
git push origin --follow-tags # optional update of your fork in GitHub
|
||||
git checkout my-new-feature
|
||||
git rebase master
|
||||
```
|
||||
|
||||
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
If you rebase commits that have been pushed to GitHub, then you will have to
|
||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Squashing your commits ###
|
||||
### Squashing your commits
|
||||
|
||||
To combine your commits into one commit:
|
||||
|
||||
git log # To count the commits to squash, e.g. the last 2
|
||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||
git status # To check everything is as expected
|
||||
```console
|
||||
git log # To count the commits to squash, e.g. the last 2
|
||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||
git status # To check everything is as expected
|
||||
```
|
||||
|
||||
If everything is fine, then make the new combined commit:
|
||||
|
||||
git commit # To commit the undone commits as one
|
||||
```console
|
||||
git commit # To commit the undone commits as one
|
||||
```
|
||||
|
||||
otherwise, you may roll back using:
|
||||
|
||||
git reflog # To check that HEAD{1} is your previous state
|
||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||
```console
|
||||
git reflog # To check that HEAD{1} is your previous state
|
||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||
```
|
||||
|
||||
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
If you squash commits that have been pushed to GitHub, then you will have to
|
||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a
|
||||
more complex situation.
|
||||
|
||||
### GitHub Continuous Integration
|
||||
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions)
|
||||
to build and test the project, which should be automatically available for your
|
||||
fork too from the `Actions` tab in your repository.
|
||||
|
||||
## Testing
|
||||
|
||||
### Code quality tests
|
||||
|
||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
|
||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then
|
||||
you can run the same tests as get run in the CI which can be very helpful.
|
||||
|
||||
You can run them with `make check` or with `golangci-lint run ./...`.
|
||||
|
||||
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
|
||||
Using these tests ensures that the rclone codebase all uses the same coding
|
||||
standards. These tests also check for easy mistakes to make (like forgetting
|
||||
to check an error return).
|
||||
|
||||
### Quick testing
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
```console
|
||||
go test -v ./...
|
||||
```
|
||||
|
||||
You can also use `make`, if supported by your platform
|
||||
|
||||
make quicktest
|
||||
```console
|
||||
make quicktest
|
||||
```
|
||||
|
||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||
The quicktest is [automatically run by GitHub](#github-continuous-integration)
|
||||
when you push your branch to GitHub.
|
||||
|
||||
### Backend testing
|
||||
|
||||
@@ -190,41 +246,50 @@ need to make a remote called `TestDrive`.
|
||||
You can then run the unit tests in the drive directory. These tests
|
||||
are skipped if `TestDrive:` isn't defined.
|
||||
|
||||
cd backend/drive
|
||||
go test -v
|
||||
```console
|
||||
cd backend/drive
|
||||
go test -v
|
||||
```
|
||||
|
||||
You can then run the integration tests which test all of rclone's
|
||||
operations. Normally these get run against the local file system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -fast-list
|
||||
```console
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -fast-list
|
||||
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
```
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
altogether with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backends drive
|
||||
```console
|
||||
go run ./fstest/test_all -backends drive
|
||||
```
|
||||
|
||||
### Full integration testing
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
make check
|
||||
make test
|
||||
```console
|
||||
make check
|
||||
make test
|
||||
```
|
||||
|
||||
The commands may require some extra go packages which you can install with
|
||||
|
||||
make build_dep
|
||||
```console
|
||||
make build_dep
|
||||
```
|
||||
|
||||
The full integration tests are run daily on the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
find the results at <https://integration.rclone.org>
|
||||
|
||||
## Code Organisation
|
||||
|
||||
@@ -232,46 +297,48 @@ Rclone code is organised into a small number of top level directories
|
||||
with modules beneath.
|
||||
|
||||
- backend - the rclone backends for interfacing to cloud providers -
|
||||
- all - import this to load all the cloud providers
|
||||
- ...providers
|
||||
- all - import this to load all the cloud providers
|
||||
- ...providers
|
||||
- bin - scripts for use while building or maintaining rclone
|
||||
- cmd - the rclone commands
|
||||
- all - import this to load all the commands
|
||||
- ...commands
|
||||
- all - import this to load all the commands
|
||||
- ...commands
|
||||
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||
- docs - the documentation and website
|
||||
- content - adjust these docs only - everything else is autogenerated
|
||||
- command - these are auto-generated - edit the corresponding .go file
|
||||
- content - adjust these docs only, except those marked autogenerated
|
||||
or portions marked autogenerated where the corresponding .go file must be
|
||||
edited instead, and everything else is autogenerated
|
||||
- commands - these are auto-generated, edit the corresponding .go file
|
||||
- fs - main rclone definitions - minimal amount of code
|
||||
- accounting - bandwidth limiting and statistics
|
||||
- asyncreader - an io.Reader which reads ahead
|
||||
- config - manage the config file and flags
|
||||
- driveletter - detect if a name is a drive letter
|
||||
- filter - implements include/exclude filtering
|
||||
- fserrors - rclone specific error handling
|
||||
- fshttp - http handling for rclone
|
||||
- fspath - path handling for rclone
|
||||
- hash - defines rclone's hash types and functions
|
||||
- list - list a remote
|
||||
- log - logging facilities
|
||||
- march - iterates directories in lock step
|
||||
- object - in memory Fs objects
|
||||
- operations - primitives for sync, e.g. Copy, Move
|
||||
- sync - sync directories
|
||||
- walk - walk a directory
|
||||
- accounting - bandwidth limiting and statistics
|
||||
- asyncreader - an io.Reader which reads ahead
|
||||
- config - manage the config file and flags
|
||||
- driveletter - detect if a name is a drive letter
|
||||
- filter - implements include/exclude filtering
|
||||
- fserrors - rclone specific error handling
|
||||
- fshttp - http handling for rclone
|
||||
- fspath - path handling for rclone
|
||||
- hash - defines rclone's hash types and functions
|
||||
- list - list a remote
|
||||
- log - logging facilities
|
||||
- march - iterates directories in lock step
|
||||
- object - in memory Fs objects
|
||||
- operations - primitives for sync, e.g. Copy, Move
|
||||
- sync - sync directories
|
||||
- walk - walk a directory
|
||||
- fstest - provides integration test framework
|
||||
- fstests - integration tests for the backends
|
||||
- mockdir - mocks an fs.Directory
|
||||
- mockobject - mocks an fs.Object
|
||||
- test_all - Runs integration tests for everything
|
||||
- fstests - integration tests for the backends
|
||||
- mockdir - mocks an fs.Directory
|
||||
- mockobject - mocks an fs.Object
|
||||
- test_all - Runs integration tests for everything
|
||||
- graphics - the images used in the website, etc.
|
||||
- lib - libraries used by the backend
|
||||
- atexit - register functions to run when rclone exits
|
||||
- dircache - directory ID to name caching
|
||||
- oauthutil - helpers for using oauth
|
||||
- pacer - retries with backoff and paces operations
|
||||
- readers - a selection of useful io.Readers
|
||||
- rest - a thin abstraction over net/http for REST
|
||||
- atexit - register functions to run when rclone exits
|
||||
- dircache - directory ID to name caching
|
||||
- oauthutil - helpers for using oauth
|
||||
- pacer - retries with backoff and paces operations
|
||||
- readers - a selection of useful io.Readers
|
||||
- rest - a thin abstraction over net/http for REST
|
||||
- librclone - in memory interface to rclone's API for embedding rclone
|
||||
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||
|
||||
@@ -279,47 +346,109 @@ with modules beneath.
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
The documentation sources are generally in Markdown format, in conformance
|
||||
with the CommonMark specification and compatible with GitHub Flavored
|
||||
Markdown (GFM). The markdown format and style is checked as part of the lint
|
||||
operation that runs automatically on pull requests, to enforce standards and
|
||||
consistency. This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
||||
tool by David Anson, which can also be integrated into editors so you can
|
||||
perform the same checks while writing. It generally follows Ciro Santilli's
|
||||
[Markdown Style Guide](https://cirosantilli.com/markdown-style-guide), which
|
||||
is good source if you want to know more.
|
||||
|
||||
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
||||
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
||||
there is currently used a different algorithm for generating header anchors
|
||||
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
|
||||
generated by Hugo any leading `-` characters are ignored, which means when
|
||||
linking to a header with text `--config string` we therefore need to use the
|
||||
link `#config-string` in our Markdown source, which will not work in GitHub's
|
||||
preview where `#--config-string` would be the correct link.
|
||||
|
||||
Most of the documentation are written directly in text files with extension
|
||||
`.md`, mainly within folder `docs/content`. Note that several of such files
|
||||
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
|
||||
or contain autogenerated portions (e.g. the backend documentation under
|
||||
`docs/content/commands`). These are marked with an `autogenerated` comment.
|
||||
The sources of the autogenerated text are usually Markdown formatted text
|
||||
embedded as string values in the Go source code, so you need to locate these
|
||||
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
|
||||
files in the root of the repository are also autogenerated. The autogeneration
|
||||
of files, and the website, will be done during the release process. See the
|
||||
`make doc` and `make website` targets in the Makefile if you are interested in
|
||||
how. You don't need to run these when adding a feature.
|
||||
|
||||
If you add a new general flag (not for a backend), then document it in
|
||||
`docs/content/docs.md` - the flags there are supposed to be in
|
||||
alphabetical order.
|
||||
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field.
|
||||
the source file in the `Help:` field:
|
||||
|
||||
- Start with the most important information about the option,
|
||||
as a single sentence on a single line.
|
||||
- This text will be used for the command-line flag help.
|
||||
- It will be combined with other information, such as any default value,
|
||||
and the result will look odd if not written as a single sentence.
|
||||
- It should end with a period/full stop character, which will be shown
|
||||
in docs but automatically removed when producing the flag help.
|
||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||
as a single sentence on a single line.
|
||||
- This text will be used for the command-line flag help.
|
||||
- It will be combined with other information, such as any default value,
|
||||
and the result will look odd if not written as a single sentence.
|
||||
- It should end with a period/full stop character, which will be shown
|
||||
in docs but automatically removed when producing the flag help.
|
||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||
- Like with docs generated from Markdown, a single line break is ignored
|
||||
and two line breaks creates a new paragraph.
|
||||
- This text will be shown to the user in `rclone config`
|
||||
and in the docs (where it will be added by `make backenddocs`,
|
||||
normally run some time before next release).
|
||||
- Like with docs generated from Markdown, a single line break is ignored
|
||||
and two line breaks creates a new paragraph.
|
||||
- This text will be shown to the user in `rclone config`
|
||||
and in the docs (where it will be added by `make backenddocs`,
|
||||
normally run some time before next release).
|
||||
- To create options of enumeration type use the `Examples:` field.
|
||||
- Each example value have their own `Help:` field, but they are treated
|
||||
a bit different than the main option help text. They will be shown
|
||||
as an unordered list, therefore a single line break is enough to
|
||||
create a new list item. Also, for enumeration texts like name of
|
||||
countries, it looks better without an ending period/full stop character.
|
||||
- Each example value have their own `Help:` field, but they are treated
|
||||
a bit different than the main option help text. They will be shown
|
||||
as an unordered list, therefore a single line break is enough to
|
||||
create a new list item. Also, for enumeration texts like name of
|
||||
countries, it looks better without an ending period/full stop character.
|
||||
- You can run `make backenddocs` to verify the resulting Markdown.
|
||||
- This will update the autogenerated sections of the backend docs Markdown
|
||||
files under `docs/content`.
|
||||
- It requires you to have [Python](https://www.python.org) installed.
|
||||
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
|
||||
and you can also run this directly, optionally with the name of a backend
|
||||
as argument to only update the docs for a specific backend.
|
||||
- **Do not** commit the updated Markdown files. This operation is run as part of
|
||||
the release process. Since any manual changes in the autogenerated sections
|
||||
of the Markdown files will then be lost, we have a pull request check that
|
||||
reports error for any changes within the autogenerated sections. Should you
|
||||
have done manual changes outside of the autogenerated sections they must be
|
||||
committed, of course.
|
||||
- You can run `make serve` to verify the resulting website.
|
||||
- This will build the website and serve it locally, so you can open it in
|
||||
your web browser and verify that the end result looks OK. Check specifically
|
||||
any added links, also in light of the note above regarding different algorithms
|
||||
for generated header anchors.
|
||||
- It requires you to have the [Hugo](https://gohugo.io) tool available.
|
||||
- The `serve` make target depends on the `website` target, which runs the
|
||||
`hugo` command from the `docs` directory to build the website, and then
|
||||
it serves the website locally with an embedded web server using a command
|
||||
`hugo server --logLevel info -w --disableFastRender --ignoreCache`, so you
|
||||
can run similar Hugo commands directly as well.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
When writing documentation for an entirely new backend,
|
||||
see [backend documentation](#backend-documentation).
|
||||
|
||||
Documentation for rclone sub commands is with their code, e.g.
|
||||
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
|
||||
line, without a period/full stop character at the end, as it will be
|
||||
combined unmodified with other information (such as any default value).
|
||||
If you are updating documentation for a command, you must do that in the
|
||||
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single
|
||||
sentence on a single line, without a period/full stop character at the end,
|
||||
as it will be combined unmodified with other information (such as any default
|
||||
value).
|
||||
|
||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy.
|
||||
Note that you can use
|
||||
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy. Just remember the
|
||||
caveat when linking to header anchors, noted above, which means that GitHub's
|
||||
Markdown preview may not be an entirely reliable verification of the results.
|
||||
|
||||
After your changes have been merged, you can verify them on
|
||||
[tip.rclone.org](https://tip.rclone.org). This site is updated daily with the
|
||||
current state of the master branch at 07:00 UTC. The changes will be on the main
|
||||
[rclone.org](https://rclone.org) site once they have been included in a release.
|
||||
|
||||
## Making a release
|
||||
|
||||
@@ -350,13 +479,13 @@ change will get linked into the issue.
|
||||
|
||||
Here is an example of a short commit message:
|
||||
|
||||
```
|
||||
```text
|
||||
drive: add team drive support - fixes #885
|
||||
```
|
||||
|
||||
And here is an example of a longer one:
|
||||
|
||||
```
|
||||
```text
|
||||
mount: fix hang on errored upload
|
||||
|
||||
In certain circumstances, if an upload failed then the mount could hang
|
||||
@@ -379,7 +508,9 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency and add it to
|
||||
`go.mod` and `go.sum`.
|
||||
|
||||
go get github.com/ncw/new_dependency
|
||||
```console
|
||||
go get github.com/ncw/new_dependency
|
||||
```
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
@@ -391,7 +522,9 @@ and `go.sum` in the same commit as your other changes.
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
go get golang.org/x/crypto
|
||||
```console
|
||||
go get golang.org/x/crypto
|
||||
```
|
||||
|
||||
Check in a single commit as above.
|
||||
|
||||
@@ -434,25 +567,38 @@ remote or an fs.
|
||||
### Getting going
|
||||
|
||||
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
|
||||
- b2 is a good one to start from if you have a bucket-based remote
|
||||
- box is a good one to start from if you have a directory-based remote (and
|
||||
shows how to use the directory cache)
|
||||
- b2 is a good one to start from if you have a bucket-based remote
|
||||
- Add your remote to the imports in `backend/all/all.go`
|
||||
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
|
||||
- Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
- `rclone purge -v TestRemote:rclone-info`
|
||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
- open `remote.csv` in a spreadsheet and examine
|
||||
- HTTP based remotes are easiest to maintain if they use rclone's
|
||||
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but
|
||||
if there is a really good Go SDK from the provider then use that instead.
|
||||
- Try to implement as many optional methods as possible as it makes the remote
|
||||
more usable.
|
||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to
|
||||
make sure we can encode any path name and `rclone info` to help determine the
|
||||
encodings needed
|
||||
- `rclone purge -v TestRemote:rclone-info`
|
||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
- open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
### Guidelines for a speedy merge
|
||||
|
||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
||||
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
|
||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
|
||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest)
|
||||
if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp)
|
||||
if your backend is HTTP based - this adds features like `--dump bodies`,
|
||||
`--tpslimit`, `--user-agent` without you having to code anything!
|
||||
- **Do** follow your example backend exactly - use the same code order, function
|
||||
names, layout, structure. **Don't** move stuff around and **Don't** delete the
|
||||
comments.
|
||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
|
||||
backends like that - don't follow them!)
|
||||
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
||||
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
|
||||
- **Remember** we have >50 backends to maintain so keeping them as similar as
|
||||
possible to each other is a high priority!
|
||||
|
||||
### Unit tests
|
||||
|
||||
@@ -463,19 +609,19 @@ remote or an fs.
|
||||
### Integration tests
|
||||
|
||||
- Add your backend to `fstest/test_all/config.yaml`
|
||||
- Once you've done that then you can use the integration test framework from the project root:
|
||||
- go install ./...
|
||||
- test_all -backends remote
|
||||
- Once you've done that then you can use the integration test framework from
|
||||
the project root:
|
||||
- `go run ./fstest/test_all -backends remote`
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
- Make sure integration tests pass with
|
||||
- `cd fs/operations`
|
||||
- `go test -v -remote TestRemote:`
|
||||
- `cd fs/sync`
|
||||
- `go test -v -remote TestRemote:`
|
||||
- `cd fs/operations`
|
||||
- `go test -v -remote TestRemote:`
|
||||
- `cd fs/sync`
|
||||
- `go test -v -remote TestRemote:`
|
||||
- If your remote defines `ListR` check with this also
|
||||
- `go test -v -remote TestRemote: -fast-list`
|
||||
- `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
@@ -487,10 +633,13 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
|
||||
- `README.md` - main GitHub page
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are
|
||||
automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your
|
||||
reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features
|
||||
table and the Optional Features table.
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
@@ -501,74 +650,55 @@ in the web browser and the links (internal and external) all work.
|
||||
|
||||
## Adding a new s3 provider
|
||||
|
||||
It is quite easy to add a new S3 provider to rclone.
|
||||
|
||||
You'll need to modify the following files
|
||||
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
- Add a section about the provider linked from there.
|
||||
- Add a transcript of a trial `rclone config` session
|
||||
- Edit the transcript to remove things which might change in subsequent versions
|
||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
||||
- `README.md` - this is the home page in github
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
|
||||
When adding the provider, endpoints, quirks, docs etc keep them in
|
||||
alphabetical order by `Provider` name, but with `AWS` first and
|
||||
`Other` last.
|
||||
|
||||
Once you've written the docs, run `make serve` and check they look OK
|
||||
in the web browser and the links (internal and external) all work.
|
||||
|
||||
Once you've written the code, test `rclone config` works to your
|
||||
satisfaction, and check the integration tests work `go test -v -remote
|
||||
NewS3Provider:`. You may need to adjust the quirks to get them to
|
||||
pass. Some providers just can't pass the tests with control characters
|
||||
in the names so if these fail and the provider doesn't support
|
||||
`urlEncodeListings` in the quirks then ignore them. Note that the
|
||||
`SetTier` test may also fail on non AWS providers.
|
||||
|
||||
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
|
||||
[Please see the guide in the S3 backend directory](backend/s3/README.md).
|
||||
|
||||
## Writing a plugin
|
||||
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go
|
||||
plugins. Changes will be kept in a dynamically loaded file instead of being
|
||||
compiled into the main binary. This is useful if you can't merge your changes
|
||||
upstream or don't want to maintain a fork of rclone.
|
||||
|
||||
### Usage
|
||||
|
||||
- Naming
|
||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||
- Example: A plugin with backend support for PiFS would be called
|
||||
`librcloneplugin_backend_pifs.so`.
|
||||
- Loading
|
||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||
- Supported on rclone v1.50 or greater.
|
||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
- Naming
|
||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||
- Example: A plugin with backend support for PiFS would be called
|
||||
`librcloneplugin_backend_pifs.so`.
|
||||
- Loading
|
||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||
- Supported on rclone v1.50 or greater.
|
||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source
|
||||
of rclone)
|
||||
|
||||
### Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
and change the top-level package name to `main`.
|
||||
|
||||
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
||||
Check `rclone --version` and make sure that the plugin's rclone dependency and
|
||||
host Go version match.
|
||||
|
||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
## Keeping a backend or command out of tree
|
||||
|
||||
Rclone was designed to be modular so it is very easy to keep a backend
|
||||
or a command out of the main rclone source tree.
|
||||
|
||||
So for example if you had a backend which accessed your proprietary
|
||||
systems or a command which was specialised for your needs you could
|
||||
add them out of tree.
|
||||
|
||||
This may be easier than using a plugin and is supported on all
|
||||
platforms not just macOS and Linux.
|
||||
|
||||
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
|
||||
which has an example of an out of tree backend `ram` (which is a
|
||||
renamed version of the `memory` backend).
|
||||
|
||||
44
Dockerfile
44
Dockerfile
@@ -1,19 +1,47 @@
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
RUN echo "**** Set Go Environment Variables ****" && \
|
||||
go env -w GOCACHE=/root/.cache/go-build
|
||||
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
make \
|
||||
bash \
|
||||
gawk \
|
||||
git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN echo "**** Download Go Dependencies ****" && \
|
||||
go mod download -x
|
||||
|
||||
RUN echo "**** Verify Go Dependencies ****" && \
|
||||
go mod verify
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
||||
echo "**** Build Binary ****" && \
|
||||
make
|
||||
|
||||
RUN echo "**** Print Version Binary ****" && \
|
||||
./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
ca-certificates \
|
||||
fuse3 \
|
||||
tzdata && \
|
||||
echo "Enable user_allow_other in fuse" && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
|
||||
118
MAINTAINERS.md
118
MAINTAINERS.md
@@ -1,4 +1,4 @@
|
||||
# Maintainers guide for rclone #
|
||||
# Maintainers guide for rclone
|
||||
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
@@ -24,80 +24,108 @@ Current active maintainers of rclone are:
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
## This is a work in progress draft
|
||||
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up
|
||||
of what I (@ncw) attempt to do.
|
||||
|
||||
## Triaging Tickets ##
|
||||
## Triaging Tickets
|
||||
|
||||
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
|
||||
When a ticket comes in it should be triaged. This means it should be classified
|
||||
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
|
||||
of back and forth to determine whether it is a valid ticket so tickets may
|
||||
remain without labels or milestone for a while.
|
||||
|
||||
Rclone uses the labels like this:
|
||||
|
||||
* `bug` - a definitely verified bug
|
||||
* `can't reproduce` - a problem which we can't reproduce
|
||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
* `enhancement: new remote` - a new rclone backend
|
||||
* `enhancement` - a new feature
|
||||
* `FUSE` - to do with `rclone mount` command
|
||||
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
|
||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
* `Remote: XXX` - which rclone backend this affects
|
||||
* `thinking` - not decided on the course of action yet
|
||||
- `bug` - a definitely verified bug
|
||||
- `can't reproduce` - a problem which we can't reproduce
|
||||
- `doc fix` - a bug in the documentation - if users need help understanding the
|
||||
docs add this label
|
||||
- `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
- `enhancement: new remote` - a new rclone backend
|
||||
- `enhancement` - a new feature
|
||||
- `FUSE` - to do with `rclone mount` command
|
||||
- `good first issue` - mark these if you find a small self-contained issue -
|
||||
these get shown to new visitors to the project
|
||||
- `help` wanted - mark these if you find a self-contained issue - these get
|
||||
shown to new visitors to the project
|
||||
- `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
- `maintenance` - internal enhancement, code re-organisation, etc.
|
||||
- `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
- `Remote: XXX` - which rclone backend this affects
|
||||
- `thinking` - not decided on the course of action yet
|
||||
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with
|
||||
the appropriate other tags. Don't forget the "good first issue" tag to give new
|
||||
contributors something easy to do to get going.
|
||||
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
|
||||
When a ticket is tagged it should be added to a milestone, either the next
|
||||
release, the one after, Soon or Help Wanted. Bugs can be added to the
|
||||
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
|
||||
something (e.g. the next go release).
|
||||
|
||||
The milestones have these meanings:
|
||||
|
||||
* v1.XX - stuff we would like to fit into this release
|
||||
* v1.XX+1 - stuff we are leaving until the next release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
- v1.XX - stuff we would like to fit into this release
|
||||
- v1.XX+1 - stuff we are leaving until the next release
|
||||
- Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||
- Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
- Known bugs - bugs waiting on external factors or we aren't going to fix for
|
||||
the moment
|
||||
|
||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile)
|
||||
are good candidates for ones that have slipped between the gaps and need
|
||||
following up.
|
||||
|
||||
## Closing Tickets ##
|
||||
## Closing Tickets
|
||||
|
||||
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||
Close tickets as soon as you can - make sure they are tagged with a release.
|
||||
Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||
|
||||
## Pull requests ##
|
||||
## Pull requests
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
Merging pull requests on GitHub itself works quite well nowadays so you can
|
||||
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
|
||||
Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
After merging the commit, in your local master branch, do `git pull` then run
|
||||
`bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
|
||||
Sometimes pull requests need to be left open for a while - this especially true
|
||||
of contributions of new backends which take a long time to get right.
|
||||
|
||||
## Merges ##
|
||||
## Merges
|
||||
|
||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to
|
||||
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||
|
||||
## Release cycle ##
|
||||
## Release cycle
|
||||
|
||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
|
||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
||||
if there is something big to merge that didn't stabilize properly or for personal
|
||||
reasons.
|
||||
|
||||
High impact regressions should be fixed before the next release.
|
||||
|
||||
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
Near the start of the release cycle, the dependencies should be updated with
|
||||
`make update` to give time for bugs to surface.
|
||||
|
||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||
Towards the end of the release cycle try not to merge anything too big so let
|
||||
things settle down.
|
||||
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the
|
||||
testing part is the most time-consuming often needing several rounds of test
|
||||
and fix depending on exactly how many new features rclone has gained.
|
||||
|
||||
## Mailing list ##
|
||||
## Mailing list
|
||||
|
||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on
|
||||
google groups.
|
||||
|
||||
## TODO ##
|
||||
## TODO
|
||||
|
||||
I should probably make a dev@rclone.org to register with cloud providers.
|
||||
I should probably make a <dev@rclone.org> to register with cloud providers.
|
||||
|
||||
50034
MANUAL.html
generated
50034
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
10974
MANUAL.txt
generated
10974
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
17
Makefile
17
Makefile
@@ -100,6 +100,7 @@ compiletest:
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@golangci-lint run $(LINTTAGS) ./...
|
||||
@bin/markdown-lint
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
|
||||
# Get the build dependencies
|
||||
@@ -113,21 +114,21 @@ release_dep_linux:
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
|
||||
# Update direct dependencies only
|
||||
updatedirect:
|
||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go mod tidy
|
||||
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
go mod tidy
|
||||
|
||||
# Update direct and indirect dependencies and test dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -d -u -t ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
go get -u -t ./...
|
||||
go mod tidy
|
||||
|
||||
# Tidy the module dependencies
|
||||
tidy:
|
||||
GO111MODULE=on go mod tidy
|
||||
go mod tidy
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
@@ -144,9 +145,11 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
go generate ./lib/transform
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
go run bin/make_bisync_docs.go ./docs/content/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
@@ -243,7 +246,7 @@ fetch_binaries:
|
||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server --logLevel info -w --disableFastRender
|
||||
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache
|
||||
|
||||
tag: retag doc
|
||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||
|
||||
279
README.md
279
README.md
@@ -1,22 +1,6 @@
|
||||
<div align="center">
|
||||
<sup>Special thanks to our sponsor:</sup>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
|
||||
<div>
|
||||
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
|
||||
</div>
|
||||
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
|
||||
<div>
|
||||
<sup>Visit warp.dev to learn more.</sup>
|
||||
</div>
|
||||
</a>
|
||||
<br>
|
||||
<hr>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
<!-- markdownlint-disable-next-line no-inline-html -->
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
@@ -34,97 +18,111 @@
|
||||
|
||||
# Rclone
|
||||
|
||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
|
||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
directories to and from different cloud storage providers.
|
||||
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
- Box [:page_facing_up:](https://rclone.org/box/)
|
||||
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
||||
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
||||
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||
- FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
- GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
|
||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
||||
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
- Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
- MEGA [:page_facing_up:](https://rclone.org/mega/)
|
||||
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
||||
- Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
- Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
|
||||
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
|
||||
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
- pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
- PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
- put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
|
||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
- Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
||||
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
||||
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
@@ -132,50 +130,55 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
|
||||
These backends adapt or modify other storage providers
|
||||
|
||||
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/)
|
||||
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||
- MD5/SHA-1 hashes checked at all times for file integrity
|
||||
- Timestamps preserved on files
|
||||
- Partial syncs supported on a whole file basis
|
||||
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed
|
||||
files
|
||||
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory
|
||||
identical
|
||||
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync
|
||||
bidirectionally
|
||||
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash
|
||||
equality
|
||||
- Can sync to and from network, e.g. two different cloud accounts
|
||||
- Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
- Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
- Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
- Multi-threaded downloads to local disk
|
||||
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
|
||||
over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||
|
||||
## Installation & documentation
|
||||
|
||||
Please see the [rclone website](https://rclone.org/) for:
|
||||
|
||||
* [Installation](https://rclone.org/install/)
|
||||
* [Documentation & configuration](https://rclone.org/docs/)
|
||||
* [Changelog](https://rclone.org/changelog/)
|
||||
* [FAQ](https://rclone.org/faq/)
|
||||
* [Storage providers](https://rclone.org/overview/)
|
||||
* [Forum](https://forum.rclone.org/)
|
||||
* ...and more
|
||||
- [Installation](https://rclone.org/install/)
|
||||
- [Documentation & configuration](https://rclone.org/docs/)
|
||||
- [Changelog](https://rclone.org/changelog/)
|
||||
- [FAQ](https://rclone.org/faq/)
|
||||
- [Storage providers](https://rclone.org/overview/)
|
||||
- [Forum](https://forum.rclone.org/)
|
||||
- ...and more
|
||||
|
||||
## Downloads
|
||||
|
||||
* https://rclone.org/downloads/
|
||||
- <https://rclone.org/downloads/>
|
||||
|
||||
License
|
||||
-------
|
||||
## License
|
||||
|
||||
This is free software under the terms of the MIT license (check the
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
170
RELEASE.md
170
RELEASE.md
@@ -4,63 +4,73 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Extra required software for making a release
|
||||
|
||||
* [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
- [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||
- pandoc for making the html and man pages
|
||||
|
||||
## Making a release
|
||||
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull # IMPORTANT
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
* make test # see integration test server or run locally
|
||||
* make tag
|
||||
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
|
||||
* make tidy
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make vendorball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
* make startdev # make startstable for stable branch
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
- git checkout master # see below for stable branch
|
||||
- git pull # IMPORTANT
|
||||
- git status - make sure everything is checked in
|
||||
- Check GitHub actions build for master is Green
|
||||
- make test # see integration test server or run locally
|
||||
- make tag
|
||||
- edit docs/content/changelog.md # make sure to remove duplicate logs from point
|
||||
releases
|
||||
- make tidy
|
||||
- make doc
|
||||
- git status - to check for new man pages - git add them
|
||||
- git commit -a -v -m "Version v1.XX.0"
|
||||
- make retag
|
||||
- git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
- git push --follow-tags origin
|
||||
- \# Wait for the GitHub builds to complete then...
|
||||
- make fetch_binaries
|
||||
- make tarball
|
||||
- make vendorball
|
||||
- make sign_upload
|
||||
- make check_sign
|
||||
- make upload
|
||||
- make upload_website
|
||||
- make upload_github
|
||||
- make startdev # make startstable for stable branch
|
||||
- \# announce with forum post, twitter post, patreon post
|
||||
|
||||
## Update dependencies
|
||||
|
||||
Early in the next release cycle update the dependencies.
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* `make updatedirect`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* Fix anything which doesn't compile at this point and commit changes here
|
||||
* `git commit -a -v -m "build: update all dependencies"`
|
||||
- Review any pinned packages in go.mod and remove if possible
|
||||
- `make updatedirect`
|
||||
- `make GOTAGS=cmount`
|
||||
- `make compiletest`
|
||||
- Fix anything which doesn't compile at this point and commit changes here
|
||||
- `git commit -a -v -m "build: update all dependencies"`
|
||||
|
||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||
then go to manual mode. `go1.20` here is the lowest supported version
|
||||
|
||||
```text
|
||||
go 1.22.0
|
||||
```
|
||||
|
||||
then go to manual mode. `go1.22` here is the lowest supported version
|
||||
in the `go.mod`.
|
||||
|
||||
```
|
||||
If `make updatedirect` added a `toolchain` directive then remove it.
|
||||
We don't want to force a toolchain on our users. Linux packagers are
|
||||
often using a version of Go that is a few versions out of date.
|
||||
|
||||
```console
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.20 -compat=1.20
|
||||
go mod tidy -go=1.22 -compat=1.22
|
||||
```
|
||||
|
||||
If the `go mod tidy` fails use the output from it to remove the
|
||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||
done
|
||||
|
||||
```
|
||||
```console
|
||||
git co go.mod go.sum
|
||||
```
|
||||
|
||||
@@ -70,12 +80,12 @@ Optionally upgrade the direct and indirect dependencies. This is very
|
||||
likely to fail if the manual method was used abve - in that case
|
||||
ignore it as it is too time consuming to fix.
|
||||
|
||||
* `make update`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* roll back any updates which didn't compile
|
||||
* `git commit -a -v --amend`
|
||||
* **NB** watch out for this changing the default go version in `go.mod`
|
||||
- `make update`
|
||||
- `make GOTAGS=cmount`
|
||||
- `make compiletest`
|
||||
- roll back any updates which didn't compile
|
||||
- `git commit -a -v --amend`
|
||||
- **NB** watch out for this changing the default go version in `go.mod`
|
||||
|
||||
Note that `make update` updates all direct and indirect dependencies
|
||||
and there can occasionally be forwards compatibility problems with
|
||||
@@ -86,11 +96,25 @@ build.
|
||||
Once it compiles locally, push it on a test branch and commit fixes
|
||||
until the tests pass.
|
||||
|
||||
### Major versions
|
||||
|
||||
The above procedure will not upgrade major versions, so v2 to v3.
|
||||
However this tool can show which major versions might need to be
|
||||
upgraded:
|
||||
|
||||
```console
|
||||
go run github.com/icholy/gomajor@latest list -major
|
||||
```
|
||||
|
||||
Expect API breakage when updating major versions.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
|
||||
bin/tidy-beta v1.55
|
||||
```console
|
||||
bin/tidy-beta v1.55
|
||||
```
|
||||
|
||||
where the version number is that of a couple ago to remove old beta binaries.
|
||||
|
||||
@@ -100,54 +124,64 @@ If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
Set vars
|
||||
|
||||
* BASE_TAG=v1.XX # e.g. v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
- BASE_TAG=v1.XX # e.g. v1.52
|
||||
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
* make startstable
|
||||
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
- make startstable
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* git co master
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
- git co ${BASE_TAG}-stable
|
||||
- git cherry-pick any fixes
|
||||
- make startstable
|
||||
- Do the steps as above
|
||||
- git co master
|
||||
- `#` cherry pick the changes to the changelog - check the diff to make sure it
|
||||
is correct
|
||||
- git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
- git push
|
||||
|
||||
## Sponsor logos
|
||||
|
||||
If updating the website note that the sponsor logos have been moved out of the main repository.
|
||||
If updating the website note that the sponsor logos have been moved out of the
|
||||
main repository.
|
||||
|
||||
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
|
||||
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos>
|
||||
which is a private repo containing artwork from sponsors.
|
||||
|
||||
## Update the website between releases
|
||||
|
||||
Create an update website branch based off the last release
|
||||
|
||||
git co -b update-website
|
||||
```console
|
||||
git co -b update-website
|
||||
```
|
||||
|
||||
If the branch already exists, double check there are no commits that need saving.
|
||||
|
||||
Now reset the branch to the last release
|
||||
|
||||
git reset --hard v1.64.0
|
||||
```console
|
||||
git reset --hard v1.64.0
|
||||
```
|
||||
|
||||
Create the changes, check them in, test with `make serve` then
|
||||
|
||||
make upload_test_website
|
||||
```console
|
||||
make upload_test_website
|
||||
```
|
||||
|
||||
Check out https://test.rclone.org and when happy
|
||||
Check out <https://test.rclone.org> and when happy
|
||||
|
||||
make upload_website
|
||||
```console
|
||||
make upload_website
|
||||
```
|
||||
|
||||
Cherry pick any changes back to master and the stable branch if it is active.
|
||||
|
||||
@@ -155,14 +189,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
|
||||
|
||||
To do a basic build of rclone's docker image to debug builds locally:
|
||||
|
||||
```
|
||||
```console
|
||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
||||
docker run --rm rclone/rclone:testing version
|
||||
```
|
||||
|
||||
To test the multipatform build
|
||||
|
||||
```
|
||||
```console
|
||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
||||
```
|
||||
|
||||
@@ -170,6 +204,6 @@ To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```
|
||||
```console
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
||||
@@ -4,19 +4,23 @@ package all
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/rclone/rclone/backend/alias"
|
||||
_ "github.com/rclone/rclone/backend/archive"
|
||||
_ "github.com/rclone/rclone/backend/azureblob"
|
||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/cloudinary"
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/doi"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filelu"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
|
||||
679
backend/archive/archive.go
Normal file
679
backend/archive/archive.go
Normal file
@@ -0,0 +1,679 @@
|
||||
//go:build !plan9
|
||||
|
||||
// Package archive implements a backend to access archive files in a remote
|
||||
package archive
|
||||
|
||||
// FIXME factor common code between backends out - eg VFS initialization
|
||||
|
||||
// FIXME can we generalize the VFS handle caching and use it in zip backend
|
||||
|
||||
// Factor more stuff out if possible
|
||||
|
||||
// Odd stats which are probably coming from the VFS
|
||||
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
|
||||
|
||||
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
|
||||
// at multiple streams - need cache mode setting?
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// Import all the required archivers here
|
||||
_ "github.com/rclone/rclone/backend/archive/squashfs"
|
||||
_ "github.com/rclone/rclone/backend/archive/zip"
|
||||
|
||||
"github.com/rclone/rclone/backend/archive/archiver"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "archive",
|
||||
Description: "Read archives",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: `Remote to wrap to read archives from.
|
||||
|
||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
"myremote:bucket" or "myremote:".
|
||||
|
||||
If this is left empty, then the archive backend will use the root as
|
||||
the remote.
|
||||
|
||||
This means that you can use :archive:remote:path and it will be
|
||||
equivalent to setting remote="remote:path".
|
||||
`,
|
||||
Required: false,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// Fs represents a archive of upstreams
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
f fs.Fs // remote we are wrapping
|
||||
wrapper fs.Fs // fs that wraps us
|
||||
|
||||
mu sync.Mutex // protects the below
|
||||
archives map[string]*archive // the archives we have, by path
|
||||
}
|
||||
|
||||
// A single open archive
|
||||
type archive struct {
|
||||
archiver archiver.Archiver // archiver responsible
|
||||
remote string // path to the archive
|
||||
prefix string // prefix to add on to listings
|
||||
root string // root of the archive to remove from listings
|
||||
mu sync.Mutex // protects the following variables
|
||||
f fs.Fs // the archive Fs, may be nil
|
||||
}
|
||||
|
||||
// If remote is an archive then return it otherwise return nil
|
||||
func findArchive(remote string) *archive {
|
||||
// FIXME use something faster than linear search?
|
||||
for _, archiver := range archiver.Archivers {
|
||||
if strings.HasSuffix(remote, archiver.Extension) {
|
||||
return &archive{
|
||||
archiver: archiver,
|
||||
remote: remote,
|
||||
prefix: remote,
|
||||
root: "",
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find an archive buried in remote
|
||||
func subArchive(remote string) *archive {
|
||||
archive := findArchive(remote)
|
||||
if archive != nil {
|
||||
return archive
|
||||
}
|
||||
parent := path.Dir(remote)
|
||||
if parent == "/" || parent == "." {
|
||||
return nil
|
||||
}
|
||||
return subArchive(parent)
|
||||
}
|
||||
|
||||
// If remote is an archive then return it otherwise return nil
|
||||
func (f *Fs) findArchive(remote string) (archive *archive) {
|
||||
archive = findArchive(remote)
|
||||
if archive != nil {
|
||||
f.mu.Lock()
|
||||
f.archives[remote] = archive
|
||||
f.mu.Unlock()
|
||||
}
|
||||
return archive
|
||||
}
|
||||
|
||||
// Instantiate archive if it hasn't been instantiated yet
|
||||
//
|
||||
// This is done lazily so that we can list a directory full of
|
||||
// archives without opening them all.
|
||||
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
if a.f != nil {
|
||||
return a.f, nil
|
||||
}
|
||||
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
|
||||
}
|
||||
a.f = newFs
|
||||
return a.f, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
|
||||
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := opt.Remote
|
||||
origRoot := root
|
||||
|
||||
// If remote is empty, use the root instead
|
||||
if remote == "" {
|
||||
remote = root
|
||||
root = ""
|
||||
}
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
if remote == "" {
|
||||
remote = "/"
|
||||
}
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
|
||||
}
|
||||
|
||||
_ = isDirectory
|
||||
|
||||
foundArchive := subArchive(remote)
|
||||
if foundArchive != nil {
|
||||
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
|
||||
// Archive path
|
||||
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
|
||||
// Path to the archive
|
||||
archiveRemote := remote[:len(foundArchive.remote)]
|
||||
// Remote is archive leaf name
|
||||
foundArchive.remote = path.Base(archiveRemote)
|
||||
foundArchive.prefix = ""
|
||||
// Point remote to archive file
|
||||
remote = archiveRemote
|
||||
}
|
||||
|
||||
// Make sure to remove trailing . referring to the current dir
|
||||
if path.Base(root) == "." {
|
||||
root = strings.TrimSuffix(root, ".")
|
||||
}
|
||||
remotePath := fspath.JoinRootPath(remote, root)
|
||||
wrappedFs, err := cache.Get(ctx, remotePath)
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
//root: path.Join(remotePath, root),
|
||||
root: origRoot,
|
||||
opt: *opt,
|
||||
f: wrappedFs,
|
||||
archives: make(map[string]*archive),
|
||||
}
|
||||
cache.PinUntilFinalized(f.f, f)
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
if foundArchive != nil {
|
||||
fs.Debugf(f, "Root is an archive")
|
||||
if err != fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("expecting to find a file at %q", remote)
|
||||
}
|
||||
return foundArchive.init(ctx, f.f)
|
||||
}
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("archive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.f.Rmdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return f.f.Hashes()
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.f.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
do := f.f.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do(ctx, dir)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.f.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
// FIXME
|
||||
// o, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// return nil, fs.ErrorCantCopy
|
||||
// }
|
||||
return do(ctx, src, remote)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.f.Features().Move
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
// FIXME
|
||||
// o, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// return nil, fs.ErrorCantMove
|
||||
// }
|
||||
return do(ctx, src, remote)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
do := f.f.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return do(ctx, srcFs.f, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
do := f.f.Features().ChangeNotify
|
||||
if do == nil {
|
||||
return
|
||||
}
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||
notifyFunc(path, entryType)
|
||||
}
|
||||
do(ctx, wrappedNotifyFunc, ch)
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
do := f.f.Features().DirCacheFlush
|
||||
if do != nil {
|
||||
do()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var o fs.Object
|
||||
var err error
|
||||
if stream {
|
||||
o, err = f.f.Features().PutStream(ctx, in, src, options...)
|
||||
} else {
|
||||
o, err = f.f.Put(ctx, in, src, options...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, false, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, true, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.f.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Find the Fs for the directory
|
||||
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
subFs = f.f
|
||||
|
||||
// FIXME should do this with a better datastructure like a prefix tree
|
||||
// FIXME want to find the longest first otherwise nesting won't work
|
||||
dirSlash := dir + "/"
|
||||
for archiverRemote, archive := range f.archives {
|
||||
subRemote := archiverRemote + "/"
|
||||
if strings.HasPrefix(dirSlash, subRemote) {
|
||||
subFs, err = archive.init(ctx, f.f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return subFs, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||
|
||||
subFs, err := f.findFs(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries, err = subFs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, entry := range entries {
|
||||
// Can only unarchive files
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
remote := o.Remote()
|
||||
archive := f.findArchive(remote)
|
||||
if archive != nil {
|
||||
// Overwrite entry with directory
|
||||
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote archive file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
|
||||
subFs, err := f.findFs(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o, err := subFs.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision is the greatest precision of all the archivers
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
if do := f.f.Features().Shutdown; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
do := f.f.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", errors.New("PublicLink not supported")
|
||||
}
|
||||
return do(ctx, remote, expire, unlink)
|
||||
}
|
||||
|
||||
// PutUnchecked in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
//
|
||||
// May create duplicates or return errors if src already
|
||||
// exists.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.f.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
}
|
||||
o, err := do(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
do := f.f.Features().MergeDirs
|
||||
if do == nil {
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
return do(ctx, dirs)
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.f.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
do := f.f.Features().OpenWriterAt
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, remote, size)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
do := f.f.Features().OpenChunkWriter
|
||||
if do == nil {
|
||||
return info, nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, remote, src, options...)
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
do := f.f.Features().UserInfo
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
do := f.f.Features().Disconnect
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||
_ fs.OpenChunkWriter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
// FIXME _ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
221
backend/archive/archive_internal_test.go
Normal file
221
backend/archive/archive_internal_test.go
Normal file
@@ -0,0 +1,221 @@
|
||||
//go:build !plan9
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// FIXME need to test Open with seek
|
||||
|
||||
// run - run a shell command
|
||||
func run(t *testing.T, args ...string) {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
fs.Debugf(nil, "run args = %v", args)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf(`
|
||||
----------------------------
|
||||
Failed to run %v: %v
|
||||
Command output was:
|
||||
%s
|
||||
----------------------------
|
||||
`, args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// check the dst and src are identical
|
||||
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
|
||||
Farchive, err := cache.Get(ctx, dstArchive)
|
||||
if err != fs.ErrorIsFile {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
Fsrc, err := cache.Get(ctx, src)
|
||||
if err != fs.ErrorIsFile {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var matches bytes.Buffer
|
||||
opt := operations.CheckOpt{
|
||||
Fdst: Farchive,
|
||||
Fsrc: Fsrc,
|
||||
Match: &matches,
|
||||
}
|
||||
|
||||
for _, action := range []string{"Check", "Download"} {
|
||||
t.Run(action, func(t *testing.T) {
|
||||
matches.Reset()
|
||||
if action == "Download" {
|
||||
assert.NoError(t, operations.CheckDownload(ctx, &opt))
|
||||
} else {
|
||||
assert.NoError(t, operations.Check(ctx, &opt))
|
||||
}
|
||||
if expectedCount > 0 {
|
||||
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
// Check we can run NewObject on all files and read them
|
||||
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
|
||||
if t.Failed() {
|
||||
return
|
||||
}
|
||||
remote := srcObj.Remote()
|
||||
archiveObj, err := Farchive.NewObject(ctx, remote)
|
||||
require.NoError(t, err, remote)
|
||||
assert.Equal(t, remote, archiveObj.Remote(), remote)
|
||||
|
||||
// Test that the contents are the same
|
||||
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
|
||||
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
|
||||
assert.Equal(t, srcBuf, archiveBuf)
|
||||
|
||||
if len(srcBuf) < 81 {
|
||||
return
|
||||
}
|
||||
|
||||
// Tests that Open works with SeekOption
|
||||
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
|
||||
|
||||
// Tests that Open works with RangeOption
|
||||
for _, test := range []struct {
|
||||
ro fs.RangeOption
|
||||
wantStart, wantEnd int
|
||||
}{
|
||||
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
|
||||
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
|
||||
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
|
||||
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
|
||||
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
|
||||
} {
|
||||
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
|
||||
foundAt := strings.Index(srcBuf, got)
|
||||
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
|
||||
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
|
||||
}
|
||||
|
||||
// Test that the modtimes are correct
|
||||
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
|
||||
|
||||
// Test that the sizes are correct
|
||||
assert.Equal(t, srcObj.Size(), archiveObj.Size())
|
||||
|
||||
// Test that Strings are OK
|
||||
assert.Equal(t, srcObj.String(), archiveObj.String())
|
||||
}))
|
||||
})
|
||||
|
||||
// t.Logf("Fdst ------------- %v", Fdst)
|
||||
// operations.List(ctx, Fdst, os.Stdout)
|
||||
// t.Logf("Fsrc ------------- %v", Fsrc)
|
||||
// operations.List(ctx, Fsrc, os.Stdout)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// test creating and reading back some archives
|
||||
//
|
||||
// Note that this uses rclone and zip as external binaries.
|
||||
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
|
||||
ctx := context.Background()
|
||||
checkFiles := 1000
|
||||
|
||||
// create random test input files
|
||||
inputRoot := t.TempDir()
|
||||
input := filepath.Join(inputRoot, archiveName)
|
||||
require.NoError(t, os.Mkdir(input, 0777))
|
||||
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
|
||||
|
||||
// Create the archive
|
||||
output := t.TempDir()
|
||||
zipFile := path.Join(output, archiveName)
|
||||
archiveFn(t, zipFile, input)
|
||||
|
||||
// Check the archive itself
|
||||
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
|
||||
|
||||
// Now check a subdirectory
|
||||
fis, err := os.ReadDir(input)
|
||||
require.NoError(t, err)
|
||||
subDir := "NOT FOUND"
|
||||
aFile := "NOT FOUND"
|
||||
for _, fi := range fis {
|
||||
if fi.IsDir() {
|
||||
subDir = fi.Name()
|
||||
} else {
|
||||
aFile = fi.Name()
|
||||
}
|
||||
}
|
||||
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
|
||||
|
||||
// Now check a single file
|
||||
fiCtx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ "+aFile))
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
|
||||
|
||||
// Now check the level above
|
||||
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
|
||||
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
|
||||
}
|
||||
|
||||
// Make sure we have the executable named
|
||||
func skipIfNoExe(t *testing.T, exeName string) {
|
||||
_, err := exec.LookPath(exeName)
|
||||
if err != nil {
|
||||
t.Skipf("%s executable not installed", exeName)
|
||||
}
|
||||
}
|
||||
|
||||
// Test creating and reading back some archives
|
||||
//
|
||||
// Note that this uses rclone and zip as external binaries.
|
||||
func TestArchiveZip(t *testing.T) {
|
||||
fstest.Initialise()
|
||||
skipIfNoExe(t, "zip")
|
||||
skipIfNoExe(t, "rclone")
|
||||
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
|
||||
oldcwd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.Chdir(input))
|
||||
defer func() {
|
||||
require.NoError(t, os.Chdir(oldcwd))
|
||||
}()
|
||||
run(t, "zip", "-9r", output, ".")
|
||||
})
|
||||
}
|
||||
|
||||
// Test creating and reading back some archives
|
||||
//
|
||||
// Note that this uses rclone and squashfs as external binaries.
|
||||
func TestArchiveSquashfs(t *testing.T) {
|
||||
fstest.Initialise()
|
||||
skipIfNoExe(t, "mksquashfs")
|
||||
skipIfNoExe(t, "rclone")
|
||||
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
|
||||
run(t, "mksquashfs", input, output)
|
||||
})
|
||||
}
|
||||
67
backend/archive/archive_test.go
Normal file
67
backend/archive/archive_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
//go:build !plan9
|
||||
|
||||
// Test Archive filesystem interface
|
||||
package archive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
|
||||
// In these tests we receive objects from the underlying remote which don't implement these methods
|
||||
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocal(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
remote := t.TempDir()
|
||||
name := "TestArchiveLocal"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "archive"},
|
||||
{Name: name, Key: "remote", Value: remote},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
func TestMemory(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
remote := ":memory:"
|
||||
name := "TestArchiveMemory"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "archive"},
|
||||
{Name: name, Key: "remote", Value: remote},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
7
backend/archive/archive_unsupported.go
Normal file
7
backend/archive/archive_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Build for archive for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9
|
||||
|
||||
// Package archive implements a backend to access archive files in a remote
|
||||
package archive
|
||||
24
backend/archive/archiver/archiver.go
Normal file
24
backend/archive/archiver/archiver.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Package archiver registers all the archivers
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Archiver describes an archive package
|
||||
type Archiver struct {
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
|
||||
Extension string
|
||||
}
|
||||
|
||||
// Archivers is a slice of all registered archivers
|
||||
var Archivers []Archiver
|
||||
|
||||
// Register adds the archivers provided to the list of known archivers
|
||||
func Register(as ...Archiver) {
|
||||
Archivers = append(Archivers, as...)
|
||||
}
|
||||
233
backend/archive/base/base.go
Normal file
233
backend/archive/base/base.go
Normal file
@@ -0,0 +1,233 @@
|
||||
// Package base is a base archive Fs
|
||||
package base
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
f fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
features *fs.Features // optional features
|
||||
vfs *vfs.VFS
|
||||
node vfs.Node // archive object
|
||||
remote string // remote of the archive object
|
||||
prefix string // position for objects
|
||||
prefixSlash string // position for objects with a slash on
|
||||
root string // position to read from within the archive
|
||||
}
|
||||
|
||||
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
|
||||
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
|
||||
// FIXME vfs cache?
|
||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||
VFS := vfs.New(wrappedFs, nil)
|
||||
node, err := VFS.Stat(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
f: wrappedFs,
|
||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||
vfs: VFS,
|
||||
node: node,
|
||||
remote: remote,
|
||||
root: root,
|
||||
prefix: prefix,
|
||||
prefixSlash: prefix + "/",
|
||||
}
|
||||
|
||||
// FIXME
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
//
|
||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false, // MimeTypes not supported with gzip
|
||||
WriteMimeType: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return nil, vfs.EROFS
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// Object describes an object to be read from the raw zip file
|
||||
type Object struct {
|
||||
f *Fs
|
||||
remote string
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
165
backend/archive/squashfs/cache.go
Normal file
165
backend/archive/squashfs/cache.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package squashfs
|
||||
|
||||
// Could just be using bare object Open with RangeRequest which
|
||||
// would transfer the minimum amount of data but may be slower.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/diskfs/go-diskfs/backend"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// Cache file handles for accessing the file
|
||||
type cache struct {
|
||||
node vfs.Node
|
||||
fhsMu sync.Mutex
|
||||
fhs []cacheHandle
|
||||
}
|
||||
|
||||
// A cached file handle
|
||||
type cacheHandle struct {
|
||||
offset int64
|
||||
fh vfs.Handle
|
||||
}
|
||||
|
||||
// Make a new cache
|
||||
func newCache(node vfs.Node) *cache {
|
||||
return &cache{
|
||||
node: node,
|
||||
}
|
||||
}
|
||||
|
||||
// Get a vfs.Handle from the pool or open one
|
||||
//
|
||||
// This tries to find an open file handle which doesn't require seeking.
|
||||
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
|
||||
c.fhsMu.Lock()
|
||||
defer c.fhsMu.Unlock()
|
||||
|
||||
if len(c.fhs) > 0 {
|
||||
// Look for exact match first
|
||||
for i, cfh := range c.fhs {
|
||||
if cfh.offset == off {
|
||||
// fs.Debugf(nil, "CACHE MATCH")
|
||||
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
|
||||
return cfh.fh, nil
|
||||
|
||||
}
|
||||
}
|
||||
// fs.Debugf(nil, "CACHE MISS")
|
||||
// Just take the first one if not found
|
||||
cfh := c.fhs[0]
|
||||
c.fhs = c.fhs[1:]
|
||||
return cfh.fh, nil
|
||||
}
|
||||
|
||||
fh, err = c.node.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
|
||||
}
|
||||
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// Close a vfs.Handle or return it to the pool
|
||||
//
|
||||
// off should be the offset the file handle would read from without seeking
|
||||
func (c *cache) close(fh vfs.Handle, off int64) {
|
||||
c.fhsMu.Lock()
|
||||
defer c.fhsMu.Unlock()
|
||||
|
||||
c.fhs = append(c.fhs, cacheHandle{
|
||||
offset: off,
|
||||
fh: fh,
|
||||
})
|
||||
}
|
||||
|
||||
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
|
||||
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
|
||||
// error encountered.
|
||||
//
|
||||
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
|
||||
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
|
||||
//
|
||||
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
||||
// space during the call. If some data is available but not len(p) bytes,
|
||||
// ReadAt blocks until either all the data is available or an error occurs.
|
||||
// In this respect ReadAt is different from Read.
|
||||
//
|
||||
// If the n = len(p) bytes returned by ReadAt are at the end of the input
|
||||
// source, ReadAt may return either err == EOF or err == nil.
|
||||
//
|
||||
// If ReadAt is reading from an input source with a seek offset, ReadAt should
|
||||
// not affect nor be affected by the underlying seek offset.
|
||||
//
|
||||
// Clients of ReadAt can execute parallel ReadAt calls on the same input
|
||||
// source.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
fh, err := c.open(off)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
defer func() {
|
||||
c.close(fh, off+int64(len(p)))
|
||||
}()
|
||||
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
|
||||
return fh.ReadAt(p, off)
|
||||
}
|
||||
|
||||
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
|
||||
|
||||
// WriteAt method dummy stub to satisfy interface
|
||||
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
return 0, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Seek method dummy stub to satisfy interface
|
||||
func (c *cache) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Read method dummy stub to satisfy interface
|
||||
func (c *cache) Read(p []byte) (n int, err error) {
|
||||
return 0, errCacheNotImplemented
|
||||
}
|
||||
|
||||
func (c *cache) Stat() (fs.FileInfo, error) {
|
||||
return nil, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Close the file
|
||||
func (c *cache) Close() (err error) {
|
||||
c.fhsMu.Lock()
|
||||
defer c.fhsMu.Unlock()
|
||||
|
||||
// Close any open file handles
|
||||
for i := range c.fhs {
|
||||
fh := &c.fhs[i]
|
||||
newErr := fh.fh.Close()
|
||||
if err == nil {
|
||||
err = newErr
|
||||
}
|
||||
}
|
||||
c.fhs = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Sys returns OS-specific file for ioctl calls via fd
|
||||
func (c *cache) Sys() (*os.File, error) {
|
||||
return nil, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Writable returns file for read-write operations
|
||||
func (c *cache) Writable() (backend.WritableFile, error) {
|
||||
return nil, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// check interfaces
|
||||
var _ backend.Storage = (*cache)(nil)
|
||||
446
backend/archive/squashfs/squashfs.go
Normal file
446
backend/archive/squashfs/squashfs.go
Normal file
@@ -0,0 +1,446 @@
|
||||
// Package squashfs implements a squashfs archiver for the archive backend
|
||||
package squashfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/diskfs/go-diskfs/filesystem/squashfs"
|
||||
"github.com/rclone/rclone/backend/archive/archiver"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
)
|
||||
|
||||
func init() {
|
||||
archiver.Register(archiver.Archiver{
|
||||
New: New,
|
||||
Extension: ".sqfs",
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
f fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
features *fs.Features // optional features
|
||||
vfs *vfs.VFS
|
||||
sqfs *squashfs.FileSystem // interface to the squashfs
|
||||
c *cache
|
||||
node vfs.Node // squashfs file object - set if reading
|
||||
remote string // remote of the squashfs file object
|
||||
prefix string // position for objects
|
||||
prefixSlash string // position for objects with a slash on
|
||||
root string // position to read from within the archive
|
||||
}
|
||||
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
||||
// FIXME vfs cache?
|
||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||
vfsOpt := vfscommon.Opt
|
||||
vfsOpt.ReadWait = 0
|
||||
VFS := vfs.New(wrappedFs, &vfsOpt)
|
||||
node, err := VFS.Stat(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||
}
|
||||
|
||||
c := newCache(node)
|
||||
|
||||
// FIXME blocksize
|
||||
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read squashfs: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
f: wrappedFs,
|
||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||
vfs: VFS,
|
||||
node: node,
|
||||
sqfs: sqfs,
|
||||
c: c,
|
||||
remote: remote,
|
||||
root: strings.Trim(root, "/"),
|
||||
prefix: prefix,
|
||||
prefixSlash: prefix + "/",
|
||||
}
|
||||
if prefix == "" {
|
||||
f.prefixSlash = ""
|
||||
}
|
||||
|
||||
singleObject := false
|
||||
|
||||
// Find the directory the root points to
|
||||
if f.root != "" && !strings.HasSuffix(root, "/") {
|
||||
native, err := f.toNative("")
|
||||
if err == nil {
|
||||
native = strings.TrimRight(native, "/")
|
||||
_, err := f.newObjectNative(native)
|
||||
if err == nil {
|
||||
// If it pointed to a file, find the directory above
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
//
|
||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false, // MimeTypes not supported with gsquashfs
|
||||
WriteMimeType: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
if singleObject {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Squashfs %q", f.name)
|
||||
}
|
||||
|
||||
// This turns a remote into a native path in the squashfs starting with a /
|
||||
func (f *Fs) toNative(remote string) (string, error) {
|
||||
native := strings.Trim(remote, "/")
|
||||
if f.prefix == "" {
|
||||
native = "/" + native
|
||||
} else if native == f.prefix {
|
||||
native = "/"
|
||||
} else if !strings.HasPrefix(native, f.prefixSlash) {
|
||||
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
|
||||
} else {
|
||||
native = native[len(f.prefix):]
|
||||
}
|
||||
if f.root != "" {
|
||||
native = "/" + f.root + native
|
||||
}
|
||||
return native, nil
|
||||
}
|
||||
|
||||
// Turn a (nativeDir, leaf) into a remote
|
||||
func (f *Fs) fromNative(nativeDir string, leaf string) string {
|
||||
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
|
||||
dir := nativeDir
|
||||
if f.root != "" {
|
||||
dir = strings.TrimPrefix(dir, "/"+f.root)
|
||||
}
|
||||
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
|
||||
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
|
||||
return remote
|
||||
}
|
||||
|
||||
// Convert a FileInfo into an Object from native dir
|
||||
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: f.fromNative(nativeDir, item.Name()),
|
||||
size: item.Size(),
|
||||
modTime: item.ModTime(),
|
||||
item: item,
|
||||
}
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
|
||||
nativeDir, err := f.toNative(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items, err := f.sqfs.ReadDir(nativeDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
|
||||
}
|
||||
|
||||
entries = make(fs.DirEntries, 0, len(items))
|
||||
for _, fi := range items {
|
||||
item, ok := fi.(squashfs.FileStat)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
||||
}
|
||||
// fs.Debugf(item.Name(), "entry = %#v", item)
|
||||
var entry fs.DirEntry
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
|
||||
}
|
||||
if item.IsDir() {
|
||||
var remote = f.fromNative(nativeDir, item.Name())
|
||||
entry = fs.NewDir(remote, item.ModTime())
|
||||
} else {
|
||||
if item.Mode().IsRegular() {
|
||||
entry = f.objectFromFileInfo(nativeDir, item)
|
||||
} else {
|
||||
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
|
||||
continue
|
||||
}
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// newObjectNative finds the object at the native path passed in
|
||||
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
|
||||
// get the path and filename
|
||||
dir, leaf := path.Split(nativePath)
|
||||
dir = strings.TrimRight(dir, "/")
|
||||
leaf = strings.Trim(leaf, "/")
|
||||
|
||||
// FIXME need to detect directory not found
|
||||
fis, err := f.sqfs.ReadDir(dir)
|
||||
if err != nil {
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
if fi.Name() == leaf {
|
||||
if fi.IsDir() {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
item, ok := fi.(squashfs.FileStat)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
||||
}
|
||||
o = f.objectFromFileInfo(dir, item)
|
||||
break
|
||||
}
|
||||
}
|
||||
if o == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
||||
|
||||
nativePath, err := f.toNative(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectNative(nativePath)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return nil, vfs.EROFS
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// Object describes an object to be read from the raw squashfs file
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
item squashfs.FileStat
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Turn a squashfs path into a full path for the parent Fs
|
||||
// func (o *Object) path(remote string) string {
|
||||
// return path.Join(o.fs.prefix, remote)
|
||||
// }
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
remote, err := o.fs.toNative(o.remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Opening %q", remote)
|
||||
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
|
||||
fh, err := o.item.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// discard data from start as necessary
|
||||
if offset > 0 {
|
||||
_, err = fh.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// If limited then don't return everything
|
||||
if limit >= 0 {
|
||||
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
|
||||
return readers.NewLimitedReadCloser(fh, limit), nil
|
||||
}
|
||||
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
385
backend/archive/zip/zip.go
Normal file
385
backend/archive/zip/zip.go
Normal file
@@ -0,0 +1,385 @@
|
||||
// Package zip implements a zip archiver for the archive backend
|
||||
package zip
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/archive/archiver"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
)
|
||||
|
||||
func init() {
|
||||
archiver.Register(archiver.Archiver{
|
||||
New: New,
|
||||
Extension: ".zip",
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
f fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
features *fs.Features // optional features
|
||||
vfs *vfs.VFS
|
||||
node vfs.Node // zip file object - set if reading
|
||||
remote string // remote of the zip file object
|
||||
prefix string // position for objects
|
||||
prefixSlash string // position for objects with a slash on
|
||||
root string // position to read from within the archive
|
||||
dt dirtree.DirTree // read from zipfile
|
||||
}
|
||||
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
||||
// FIXME vfs cache?
|
||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||
vfsOpt := vfscommon.Opt
|
||||
vfsOpt.ReadWait = 0
|
||||
VFS := vfs.New(wrappedFs, &vfsOpt)
|
||||
node, err := VFS.Stat(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
f: wrappedFs,
|
||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||
vfs: VFS,
|
||||
node: node,
|
||||
remote: remote,
|
||||
root: root,
|
||||
prefix: prefix,
|
||||
prefixSlash: prefix + "/",
|
||||
}
|
||||
|
||||
// Read the contents of the zip file
|
||||
singleObject, err := f.readZip()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open zip file: %w", err)
|
||||
}
|
||||
|
||||
// FIXME
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
//
|
||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false, // MimeTypes not supported with gzip
|
||||
WriteMimeType: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
if singleObject {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Zip %q", f.name)
|
||||
}
|
||||
|
||||
// readZip the zip file into f
|
||||
//
|
||||
// Returns singleObject=true if f.root points to a file
|
||||
func (f *Fs) readZip() (singleObject bool, err error) {
|
||||
if f.node == nil {
|
||||
return singleObject, fs.ErrorDirNotFound
|
||||
}
|
||||
size := f.node.Size()
|
||||
if size < 0 {
|
||||
return singleObject, errors.New("can't read from zip file with unknown size")
|
||||
}
|
||||
r, err := f.node.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
|
||||
}
|
||||
zr, err := zip.NewReader(r, size)
|
||||
if err != nil {
|
||||
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
|
||||
}
|
||||
dt := dirtree.New()
|
||||
for _, file := range zr.File {
|
||||
remote := strings.Trim(path.Clean(file.Name), "/")
|
||||
if remote == "." {
|
||||
remote = ""
|
||||
}
|
||||
remote = path.Join(f.prefix, remote)
|
||||
if f.root != "" {
|
||||
// Ignore all files outside the root
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
continue
|
||||
}
|
||||
if remote == f.root {
|
||||
remote = ""
|
||||
} else {
|
||||
remote = strings.TrimPrefix(remote, f.root+"/")
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(file.Name, "/") {
|
||||
dir := fs.NewDir(remote, file.Modified)
|
||||
dt.AddDir(dir)
|
||||
} else {
|
||||
if remote == "" {
|
||||
remote = path.Base(f.root)
|
||||
singleObject = true
|
||||
dt = dirtree.New()
|
||||
}
|
||||
o := &Object{
|
||||
f: f,
|
||||
remote: remote,
|
||||
fh: &file.FileHeader,
|
||||
file: file,
|
||||
}
|
||||
dt.Add(o)
|
||||
if singleObject {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
dt.CheckParents("")
|
||||
dt.Sort()
|
||||
f.dt = dt
|
||||
//fs.Debugf(nil, "dt = %v", dt)
|
||||
return singleObject, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
// _, err = f.strip(dir)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
entries, ok := f.dt[dir]
|
||||
if !ok {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
||||
if f.dt == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
_, entry := f.dt.Find(remote)
|
||||
if entry == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
o, ok := entry.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return nil, vfs.EROFS
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.CRC32)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// Object describes an object to be read from the raw zip file
|
||||
type Object struct {
|
||||
f *Fs
|
||||
remote string
|
||||
fh *zip.FileHeader
|
||||
file *zip.File
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.fh.UncompressedSize64)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.fh.Modified
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
if ht == hash.CRC32 {
|
||||
// FIXME return empty CRC if writing
|
||||
if o.f.dt == nil {
|
||||
return "", nil
|
||||
}
|
||||
return fmt.Sprintf("%08x", o.fh.CRC32), nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rc, err = o.file.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// discard data from start as necessary
|
||||
if offset > 0 {
|
||||
_, err = io.CopyN(io.Discard, rc, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// If limited then don't return everything
|
||||
if limit >= 0 {
|
||||
return readers.NewLimitedReadCloser(rc, limit), nil
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,16 +3,149 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
func TestBlockIDCreator(t *testing.T) {
|
||||
// Check creation and random number
|
||||
bic, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
bic2, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, bic.random, bic2.random)
|
||||
assert.NotEqual(t, bic.random, [8]byte{})
|
||||
|
||||
// Set random to known value for tests
|
||||
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
chunkNumber := uint64(0xFEDCBA9876543210)
|
||||
|
||||
// Check creation of ID
|
||||
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
||||
got := bic.newBlockID(chunkNumber)
|
||||
assert.Equal(t, want, got)
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
||||
|
||||
// Test checkID is working
|
||||
assert.NoError(t, bic.checkID(chunkNumber, got))
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
||||
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
||||
}
|
||||
|
||||
func (f *Fs) testFeatures(t *testing.T) {
|
||||
// Check first feature flags are set on this remote
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
type ReadSeekCloser struct {
|
||||
*strings.Reader
|
||||
}
|
||||
|
||||
func (r *ReadSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stage a block at remote but don't commit it
|
||||
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
||||
var (
|
||||
containerName, blobPath = f.split(remote)
|
||||
containerClient = f.cntSVC(containerName)
|
||||
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
||||
data = "uncommitted data"
|
||||
blockID = "1"
|
||||
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
||||
)
|
||||
r := &ReadSeekCloser{strings.NewReader(data)}
|
||||
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the block is staged but not committed
|
||||
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, block := range blockList.UncommittedBlocks {
|
||||
if *block.Name == blockIDBase64 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.True(t, found, "Block ID not found in uncommitted blocks")
|
||||
}
|
||||
|
||||
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
||||
//
|
||||
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
||||
//
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
||||
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
remote = "testBlob"
|
||||
)
|
||||
|
||||
// Multipart copy the blob please
|
||||
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
||||
f.opt.UseCopyBlob = false
|
||||
f.opt.CopyCutoff = f.opt.ChunkSize
|
||||
defer func() {
|
||||
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
||||
}()
|
||||
|
||||
// Create a blob with uncommitted blocks
|
||||
f.stageBlockWithoutCommit(ctx, t, remote)
|
||||
|
||||
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
||||
|
||||
// Check the object does not exist
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
||||
size := 4*int(f.opt.ChunkSize) - 1
|
||||
contents := random.String(size)
|
||||
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
|
||||
// Check size
|
||||
assert.Equal(t, int64(size), o.Size())
|
||||
|
||||
// Create a new blob with uncommitted blocks
|
||||
newRemote := "testBlob2"
|
||||
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
||||
|
||||
// Copy over that block
|
||||
dst, err := f.Copy(ctx, o, newRemote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check basics
|
||||
assert.Equal(t, int64(size), dst.Size())
|
||||
assert.Equal(t, newRemote, dst.Remote())
|
||||
|
||||
// Check contents
|
||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||
assert.Equal(t, contents, gotContents)
|
||||
|
||||
// Remove the object
|
||||
require.NoError(t, dst.Remove(ctx))
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Features", f.testFeatures)
|
||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||
}
|
||||
|
||||
@@ -15,13 +15,17 @@ import (
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestAzureBlob"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -40,6 +44,7 @@ func TestIntegration2(t *testing.T) {
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -48,8 +53,13 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setCopyCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
|
||||
@@ -56,6 +56,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -237,6 +238,30 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_instance_discovery",
|
||||
Help: `Skip requesting Microsoft Entra instance metadata
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||
authenticating.
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_az",
|
||||
Help: `Use Azure CLI tool az for authentication
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set env_auth at the same time.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
@@ -319,10 +344,12 @@ type Options struct {
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
||||
@@ -393,8 +420,10 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
policyClientOptions := policy.ClientOptions{
|
||||
Transport: newTransporter(ctx),
|
||||
}
|
||||
backup := service.ShareTokenIntentBackup
|
||||
clientOpt := service.ClientOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
FileRequestIntent: &backup,
|
||||
}
|
||||
|
||||
// Here we auth by setting one of cred, sharedKeyCred or f.client
|
||||
@@ -412,7 +441,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
// Read credentials from the environment
|
||||
options := azidentity.DefaultAzureCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
@@ -423,6 +453,13 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
options := azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
fmt.Println(cred)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.SASURL != "":
|
||||
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
|
||||
if err != nil {
|
||||
@@ -480,6 +517,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
||||
// User with username and password
|
||||
//nolint:staticcheck // this is deprecated due to Azure policy
|
||||
options := azidentity.UsernamePasswordCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
@@ -513,7 +551,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
case opt.UseMSI:
|
||||
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||
var b2i = map[bool]int{false: 0, true: 1}
|
||||
b2i := map[bool]int{false: 0, true: 1}
|
||||
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
|
||||
if set > 1 {
|
||||
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||
@@ -532,6 +570,37 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "":
|
||||
// Workload Identity based authentication
|
||||
var options azidentity.ManagedIdentityCredentialOptions
|
||||
options.ID = azidentity.ClientID(opt.MSIClientID)
|
||||
|
||||
msiCred, err := azidentity.NewManagedIdentityCredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
|
||||
getClientAssertions := func(context.Context) (string, error) {
|
||||
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
|
||||
Scopes: []string{"api://AzureADTokenExchange"},
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
|
||||
return token.Token, nil
|
||||
}
|
||||
|
||||
assertOpts := &azidentity.ClientAssertionCredentialOptions{}
|
||||
cred, err = azidentity.NewClientAssertionCredential(
|
||||
opt.Tenant,
|
||||
opt.ClientID,
|
||||
getClientAssertions,
|
||||
assertOpts)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire client assertion token: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("no authentication method configured")
|
||||
}
|
||||
@@ -775,18 +844,35 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
var entries fs.DirEntries
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
subDirClient := f.dirClient(dir)
|
||||
|
||||
// Checking whether directory exists
|
||||
_, err := subDirClient.GetProperties(ctx, nil)
|
||||
if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
|
||||
return entries, fs.ErrorDirNotFound
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err != nil {
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
|
||||
var opt = &directory.ListFilesAndDirectoriesOptions{
|
||||
opt := &directory.ListFilesAndDirectoriesOptions{
|
||||
Include: directory.ListFilesInclude{
|
||||
Timestamps: true,
|
||||
},
|
||||
@@ -795,7 +881,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
for _, directory := range resp.Segment.Directories {
|
||||
// Name *string `xml:"Name"`
|
||||
@@ -821,7 +907,10 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
if directory.Properties.ContentLength != nil {
|
||||
entry.SetSize(*directory.Properties.ContentLength)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, file := range resp.Segment.Files {
|
||||
leaf := f.opt.Enc.ToStandardPath(*file.Name)
|
||||
@@ -835,10 +924,13 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
if file.Properties.LastWriteTime != nil {
|
||||
entry.modTime = *file.Properties.LastWriteTime
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -885,7 +977,7 @@ func (o *Object) setMetadata(resp *file.GetPropertiesResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
// getMetadata gets the metadata if it hasn't already been fetched
|
||||
func (o *Object) getMetadata(ctx context.Context) error {
|
||||
resp, err := o.fileClient().GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
@@ -897,7 +989,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
//
|
||||
// May make a network request becaue the [fs.List] method does not
|
||||
// May make a network request because the [fs.List] method does not
|
||||
// return MD5 hashes for DirEntry
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
@@ -945,6 +1037,10 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
SMBProperties: &file.SMBProperties{
|
||||
LastWriteTime: &t,
|
||||
},
|
||||
HTTPHeaders: &file.HTTPHeaders{
|
||||
ContentMD5: o.md5,
|
||||
ContentType: &o.contentType,
|
||||
},
|
||||
}
|
||||
_, err := o.fileClient().SetHTTPHeaders(ctx, &opt)
|
||||
if err != nil {
|
||||
@@ -1241,10 +1337,29 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
srcURL := srcObj.fileClient().URL()
|
||||
fc := f.fileClient(remote)
|
||||
_, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||
startCopy, err := fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy failed: %w", err)
|
||||
}
|
||||
|
||||
// Poll for completion if necessary
|
||||
//
|
||||
// The for loop is never executed for same storage account copies.
|
||||
copyStatus := startCopy.CopyStatus
|
||||
var properties file.GetPropertiesResponse
|
||||
pollTime := 100 * time.Millisecond
|
||||
|
||||
for copyStatus != nil && string(*copyStatus) == string(file.CopyStatusTypePending) {
|
||||
time.Sleep(pollTime)
|
||||
|
||||
properties, err = fc.GetProperties(ctx, &file.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copyStatus = properties.CopyStatus
|
||||
pollTime = min(2*pollTime, time.Second)
|
||||
}
|
||||
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
|
||||
@@ -1359,6 +1474,7 @@ var (
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -61,7 +61,7 @@ const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
||||
|
||||
func randomString(charCount int) string {
|
||||
strBldr := strings.Builder{}
|
||||
for i := 0; i < charCount; i++ {
|
||||
for range charCount {
|
||||
randPos := rand.Int63n(52)
|
||||
strBldr.WriteByte(chars[randPos])
|
||||
}
|
||||
|
||||
@@ -42,9 +42,18 @@ type Bucket struct {
|
||||
|
||||
// LifecycleRule is a single lifecycle rule
|
||||
type LifecycleRule struct {
|
||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
}
|
||||
|
||||
// ServerSideEncryption is a configuration object for B2 Server-Side Encryption
|
||||
type ServerSideEncryption struct {
|
||||
Mode string `json:"mode"`
|
||||
Algorithm string `json:"algorithm"` // Encryption algorithm to use
|
||||
CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files
|
||||
CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
@@ -129,10 +138,10 @@ type AuthorizeAccountResponse struct {
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
@@ -260,21 +269,22 @@ type GetFileInfoRequest struct {
|
||||
//
|
||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
ServerSideEncryption *ServerSideEncryption `json:"serverSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption
|
||||
}
|
||||
|
||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||
@@ -324,21 +334,25 @@ type CancelLargeFileResponse struct {
|
||||
|
||||
// CopyFileRequest is as passed to b2_copy_file
|
||||
type CopyFileRequest struct {
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
Name string `json:"fileName"` // The name of the new file being created.
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
Name string `json:"fileName"` // The name of the new file being created.
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
||||
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
||||
}
|
||||
|
||||
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
|
||||
type CopyPartRequest struct {
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
||||
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
||||
}
|
||||
|
||||
// UpdateBucketRequest describes a request to modify a B2 bucket
|
||||
|
||||
312
backend/b2/b2.go
312
backend/b2/b2.go
@@ -8,7 +8,9 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -16,6 +18,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -30,7 +33,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
@@ -51,6 +55,9 @@ const (
|
||||
nameHeader = "X-Bz-File-Name"
|
||||
timestampHeader = "X-Bz-Upload-Timestamp"
|
||||
retryAfterHeader = "Retry-After"
|
||||
sseAlgorithmHeader = "X-Bz-Server-Side-Encryption-Customer-Algorithm"
|
||||
sseKeyHeader = "X-Bz-Server-Side-Encryption-Customer-Key"
|
||||
sseMd5Header = "X-Bz-Server-Side-Encryption-Customer-Key-Md5"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
@@ -250,6 +257,51 @@ See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in B2.",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}, {
|
||||
Value: "AES256",
|
||||
Help: "Advanced Encryption Standard (256 bits key length)",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data
|
||||
|
||||
Alternatively you can provide --sse-customer-key-base64.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_base64",
|
||||
Help: `To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data
|
||||
|
||||
Alternatively you can provide --sse-customer-key.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_md5",
|
||||
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
|
||||
|
||||
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
|
||||
`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -272,6 +324,10 @@ type Options struct {
|
||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||
Lifecycle int `config:"lifecycle"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
|
||||
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
@@ -299,14 +355,13 @@ type Fs struct {
|
||||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -503,6 +558,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyBase64 != "" {
|
||||
return nil, errors.New("b2: can't use both sse_customer_key and sse_customer_key_base64 at the same time")
|
||||
} else if opt.SSECustomerKeyBase64 != "" {
|
||||
// Decode the Base64-encoded key and store it in the SSECustomerKey field
|
||||
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("b2: Could not decode sse_customer_key_base64: %w", err)
|
||||
}
|
||||
opt.SSECustomerKey = string(decoded)
|
||||
} else {
|
||||
// Encode the raw key as Base64
|
||||
opt.SSECustomerKeyBase64 = base64.StdEncoding.EncodeToString([]byte(opt.SSECustomerKey))
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
|
||||
// Calculate CustomerKeyMd5 if not supplied
|
||||
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
|
||||
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
@@ -589,12 +662,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
|
||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||
func (f *Fs) hasPermission(permission string) bool {
|
||||
for _, capability := range f.info.Allowed.Capabilities {
|
||||
if capability == permission {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
@@ -851,7 +919,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
last := ""
|
||||
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||
@@ -859,16 +927,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
@@ -894,14 +962,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -922,7 +1022,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
list := list.NewHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
last := ""
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
@@ -1275,7 +1375,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(f.ci.Transfers)
|
||||
for i := 0; i < f.ci.Transfers; i++ {
|
||||
for range f.ci.Transfers {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for object := range toBeDeleted {
|
||||
@@ -1318,16 +1418,22 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||
// Check current version of the file
|
||||
if deleteHidden && object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "delete") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
}
|
||||
last = remote
|
||||
tr.Done(ctx, nil)
|
||||
@@ -1401,6 +1507,16 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
Name: f.opt.Enc.FromStandardPath(dstPath),
|
||||
DestBucketID: destBucketID,
|
||||
}
|
||||
if f.opt.SSECustomerKey != "" && f.opt.SSECustomerKeyMD5 != "" {
|
||||
serverSideEncryptionConfig := api.ServerSideEncryption{
|
||||
Mode: "SSE-C",
|
||||
Algorithm: f.opt.SSECustomerAlgorithm,
|
||||
CustomerKey: f.opt.SSECustomerKeyBase64,
|
||||
CustomerKeyMd5: f.opt.SSECustomerKeyMD5,
|
||||
}
|
||||
request.SourceServerSideEncryption = &serverSideEncryptionConfig
|
||||
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
|
||||
}
|
||||
if newInfo == nil {
|
||||
request.MetadataDirective = "COPY"
|
||||
} else {
|
||||
@@ -1598,9 +1714,6 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1674,6 +1787,21 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
return o.getMetaDataListing(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// If using versionAt we need to list the find the correct version.
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
info, err := o.getMetaDataListing(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.Action == "hide" {
|
||||
// Rerturn object not found error if the current version is deleted.
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
_, info, err = o.getOrHead(ctx, "HEAD", nil)
|
||||
return info, err
|
||||
}
|
||||
@@ -1820,9 +1948,10 @@ var _ io.ReadCloser = &openFile{}
|
||||
|
||||
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: method,
|
||||
Options: options,
|
||||
NoResponse: method == "HEAD",
|
||||
Method: method,
|
||||
Options: options,
|
||||
NoResponse: method == "HEAD",
|
||||
ExtraHeaders: map[string]string{},
|
||||
}
|
||||
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
@@ -1840,6 +1969,11 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
bucket, bucketPath := o.split()
|
||||
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
|
||||
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
|
||||
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
@@ -1880,20 +2014,18 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
// length read from the listing.
|
||||
// Additionally, the official examples return S3 headers
|
||||
// instead of native, i.e. no file ID, use ones from listing.
|
||||
if info.Size < 0 {
|
||||
info.Size = o.size
|
||||
}
|
||||
if info.ID == "" {
|
||||
info.ID = o.id
|
||||
}
|
||||
return resp, info, nil
|
||||
}
|
||||
|
||||
@@ -1943,7 +2075,7 @@ func init() {
|
||||
// urlEncode encodes in with % encoding
|
||||
func urlEncode(in string) string {
|
||||
var out bytes.Buffer
|
||||
for i := 0; i < len(in); i++ {
|
||||
for i := range len(in) {
|
||||
c := in[i]
|
||||
if noNeedToEncode[c] {
|
||||
_ = out.WriteByte(c)
|
||||
@@ -2106,6 +2238,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
|
||||
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
|
||||
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
var response api.FileInfo
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -2180,13 +2317,17 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
if err != nil {
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(f.opt.ChunkSize),
|
||||
ChunkSize: up.chunkSize,
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
return info, up, err
|
||||
return info, up, nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -2216,31 +2357,36 @@ func (o *Object) ID() string {
|
||||
|
||||
var lifecycleHelp = fs.CommandHelp{
|
||||
Name: "lifecycle",
|
||||
Short: "Read or set the lifecycle for a bucket",
|
||||
Short: "Read or set the lifecycle for a bucket.",
|
||||
Long: `This command can be used to read or set the lifecycle for a bucket.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
To show the current lifecycle rules:
|
||||
|
||||
rclone backend lifecycle b2:bucket
|
||||
` + "```console" + `
|
||||
rclone backend lifecycle b2:bucket
|
||||
` + "```" + `
|
||||
|
||||
This will dump something like this showing the lifecycle rules.
|
||||
|
||||
[
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
` + "```" + `
|
||||
|
||||
If there are no lifecycle rules (the default) then it will just return [].
|
||||
If there are no lifecycle rules (the default) then it will just return ` + "`[]`" + `.
|
||||
|
||||
To reset the current lifecycle rules:
|
||||
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||
` + "```console" + `
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||
` + "```" + `
|
||||
|
||||
This will run and then print the new lifecycle rules as above.
|
||||
|
||||
@@ -2252,17 +2398,21 @@ the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
|
||||
the config also which will mean deletions won't cause versions but
|
||||
overwrites will still cause versions to be made.
|
||||
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||
` + "```console" + `
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||
` + "```" + `
|
||||
|
||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||
`,
|
||||
See: <https://www.backblaze.com/docs/cloud-storage-lifecycle-rules>`,
|
||||
Opts: map[string]string{
|
||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||
"daysFromHidingToDeleting": `After a file has been hidden for this many days
|
||||
it is deleted. 0 is off.`,
|
||||
"daysFromUploadingToHiding": `This many days after uploading a file is hidden.`,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": `Cancels any unfinished
|
||||
large file versions after this many days.`,
|
||||
},
|
||||
}
|
||||
|
||||
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
var newRule api.LifecycleRule
|
||||
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
|
||||
days, err := strconv.Atoi(daysStr)
|
||||
@@ -2278,14 +2428,23 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
||||
}
|
||||
newRule.DaysFromUploadingToHiding = &days
|
||||
}
|
||||
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
|
||||
days, err := strconv.Atoi(daysStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
|
||||
}
|
||||
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
|
||||
}
|
||||
bucketName, _ := f.split("")
|
||||
if bucketName == "" {
|
||||
return nil, errors.New("bucket required")
|
||||
|
||||
}
|
||||
|
||||
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
|
||||
|
||||
var bucket *api.Bucket
|
||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
|
||||
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
|
||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2332,17 +2491,18 @@ max-age, which defaults to 24 hours.
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup b2:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||
` + "```console" + `
|
||||
rclone backend cleanup b2:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||
` + "```" + `
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
`,
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
"max-age": "Max age of upload to delete.",
|
||||
},
|
||||
}
|
||||
|
||||
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
maxAge := defaultMaxAge
|
||||
if opt["max-age"] != "" {
|
||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||
@@ -2361,11 +2521,12 @@ var cleanupHiddenHelp = fs.CommandHelp{
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||
` + "```",
|
||||
}
|
||||
|
||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
return nil, f.cleanUp(ctx, true, false, 0)
|
||||
}
|
||||
|
||||
@@ -2384,7 +2545,7 @@ var commandHelp = []fs.CommandHelp{
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "lifecycle":
|
||||
return f.lifecycleCommand(ctx, name, arg, opt)
|
||||
@@ -2405,6 +2566,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
@@ -256,12 +258,6 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
@@ -450,37 +446,174 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
t.Run("List", func(t *testing.T) {
|
||||
fstest.CheckListing(t, f, test.want)
|
||||
})
|
||||
// b2 NewObject doesn't work with VersionAt
|
||||
//t.Run("NewObject", func(t *testing.T) {
|
||||
// gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||
// assert.Equal(t, test.wantErr, gotErr)
|
||||
// if gotErr == nil {
|
||||
// assert.Equal(t, test.wantSize, gotObj.Size())
|
||||
// }
|
||||
//})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
if gotErr == nil {
|
||||
assert.Equal(t, test.wantSize, gotObj.Size())
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Cleanup", func(t *testing.T) {
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
// Set --b2-versions for this test
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
fstest.CheckListing(t, f, items)
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should be unchanged after dry run
|
||||
before := listAllFiles(ctx, t, f, dirName)
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
after := listAllFiles(ctx, t, f, dirName)
|
||||
assert.Equal(t, before, after)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should reflect current state after cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
})
|
||||
})
|
||||
|
||||
// Purge gets tested later
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// B2CleanupHidden tests cleaning up hidden files
|
||||
t.Run("CleanupUnfinished", func(t *testing.T) {
|
||||
dirName := "unfinished"
|
||||
fileCount := 5
|
||||
expectedFiles := []string{}
|
||||
for i := 1; i < fileCount; i++ {
|
||||
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
|
||||
expectedFiles = append(expectedFiles, fileName)
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: fileName,
|
||||
}
|
||||
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
||||
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// Listing should not change after dry run
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
// Listing should be empty after real cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, []string{})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
|
||||
bucket, directory := f.split(dirName)
|
||||
foundFiles := []string{}
|
||||
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if !isDirectory {
|
||||
foundFiles = append(foundFiles, object.Name)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
sort.Strings(foundFiles)
|
||||
return foundFiles
|
||||
}
|
||||
|
||||
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
|
||||
foundFiles := listAllFiles(ctx, t, f, dirName)
|
||||
sort.Strings(expectedFiles)
|
||||
assert.Equal(t, expectedFiles, foundFiles)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
opt := map[string]string{}
|
||||
|
||||
t.Run("InitState", func(t *testing.T) {
|
||||
// There should be no lifecycle rules at the outset
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// There should still be no lifecycle rules after each dry run operation
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
})
|
||||
}
|
||||
|
||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("Versions", f.InternalTestVersions)
|
||||
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
|
||||
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -144,6 +144,14 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
request.ContentType = newInfo.ContentType
|
||||
request.Info = newInfo.Info
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
request.ServerSideEncryption = &api.ServerSideEncryption{
|
||||
Mode: "SSE-C",
|
||||
Algorithm: o.fs.opt.SSECustomerAlgorithm,
|
||||
CustomerKey: o.fs.opt.SSECustomerKeyBase64,
|
||||
CustomerKeyMd5: o.fs.opt.SSECustomerKeyMD5,
|
||||
}
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
@@ -295,6 +303,12 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
||||
ContentLength: &sizeWithHash,
|
||||
}
|
||||
|
||||
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
opts.ExtraHeaders[sseAlgorithmHeader] = up.o.fs.opt.SSECustomerAlgorithm
|
||||
opts.ExtraHeaders[sseKeyHeader] = up.o.fs.opt.SSECustomerKeyBase64
|
||||
opts.ExtraHeaders[sseMd5Header] = up.o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
@@ -334,6 +348,17 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
||||
PartNumber: int64(part + 1),
|
||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||
}
|
||||
|
||||
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
serverSideEncryptionConfig := api.ServerSideEncryption{
|
||||
Mode: "SSE-C",
|
||||
Algorithm: up.o.fs.opt.SSECustomerAlgorithm,
|
||||
CustomerKey: up.o.fs.opt.SSECustomerKeyBase64,
|
||||
CustomerKeyMd5: up.o.fs.opt.SSECustomerKeyMD5,
|
||||
}
|
||||
request.SourceServerSideEncryption = &serverSideEncryptionConfig
|
||||
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
|
||||
}
|
||||
var response api.UploadPartResponse
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||
@@ -478,17 +503,14 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
||||
remaining = up.size
|
||||
)
|
||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
||||
for part := 0; part < up.parts; part++ {
|
||||
for part := range up.parts {
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= up.chunkSize {
|
||||
reqSize = up.chunkSize
|
||||
}
|
||||
reqSize := min(remaining, up.chunkSize)
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
|
||||
@@ -125,10 +125,21 @@ type FolderItems struct {
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
NextMarker *string `json:"next_marker,omitempty"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
} `json:"order"`
|
||||
// There is some confusion about how this is actually
|
||||
// returned. The []struct has worked for many years, but in
|
||||
// https://github.com/rclone/rclone/issues/8776 box was
|
||||
// returning it returned not as a list. We don't actually use
|
||||
// this so comment it out.
|
||||
//
|
||||
// Order struct {
|
||||
// By string `json:"by"`
|
||||
// Direction string `json:"direction"`
|
||||
// } `json:"order"`
|
||||
//
|
||||
// Order []struct {
|
||||
// By string `json:"by"`
|
||||
// Direction string `json:"direction"`
|
||||
// } `json:"order"`
|
||||
}
|
||||
|
||||
// Parent defined the ID of the parent directory
|
||||
@@ -271,9 +282,9 @@ type User struct {
|
||||
ModifiedAt time.Time `json:"modified_at"`
|
||||
Language string `json:"language"`
|
||||
Timezone string `json:"timezone"`
|
||||
SpaceAmount int64 `json:"space_amount"`
|
||||
SpaceUsed int64 `json:"space_used"`
|
||||
MaxUploadSize int64 `json:"max_upload_size"`
|
||||
SpaceAmount float64 `json:"space_amount"`
|
||||
SpaceUsed float64 `json:"space_used"`
|
||||
MaxUploadSize float64 `json:"max_upload_size"`
|
||||
Status string `json:"status"`
|
||||
JobTitle string `json:"job_title"`
|
||||
Phone string `json:"phone"`
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -46,7 +47,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -65,12 +65,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -240,8 +238,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
||||
signingHeaders := map[string]interface{}{
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any {
|
||||
signingHeaders := map[string]any{
|
||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
return signingHeaders
|
||||
@@ -258,6 +256,9 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if block == nil {
|
||||
return nil, errors.New("box: failed to PEM decode private key")
|
||||
}
|
||||
if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||
}
|
||||
@@ -705,9 +706,27 @@ OUTER:
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
||||
@@ -717,14 +736,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||
// FIXME more info from dir?
|
||||
entries = append(entries, d)
|
||||
err = list.Add(d)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
} else if info.Type == api.ItemTypeFile {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
err = list.Add(o)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Cache some metadata for this Item to help us process events later
|
||||
@@ -740,12 +767,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
return iErr
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -1343,12 +1370,8 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
nextStreamPosition = streamPosition
|
||||
|
||||
for {
|
||||
limit := f.opt.ListChunk
|
||||
|
||||
// box only allows a max of 500 events
|
||||
if limit > 500 {
|
||||
limit = 500
|
||||
}
|
||||
limit := min(f.opt.ListChunk, 500)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
@@ -1745,6 +1768,7 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
|
||||
@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
for tries = range maxTries {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
if err != nil {
|
||||
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
outer:
|
||||
for part := 0; part < session.TotalParts; part++ {
|
||||
for part := range session.TotalParts {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
@@ -211,10 +211,7 @@ outer:
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
reqSize := min(remaining, chunkSize)
|
||||
|
||||
// Make a block of memory
|
||||
buf := make([]byte, reqSize)
|
||||
|
||||
15
backend/cache/cache.go
vendored
15
backend/cache/cache.go
vendored
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
@@ -683,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
start, end int64
|
||||
}
|
||||
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
||||
for _, part := range strings.Split(ranges, ",") {
|
||||
for part := range strings.SplitSeq(ranges, ",") {
|
||||
var start, end int64 = 0, math.MaxInt64
|
||||
switch ints := strings.Split(part, ":"); len(ints) {
|
||||
case 1:
|
||||
@@ -1086,13 +1087,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return cachedEntries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
|
||||
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
|
||||
entries, err := f.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(entries); i++ {
|
||||
for i := range entries {
|
||||
innerDir, ok := entries[i].(fs.Directory)
|
||||
if ok {
|
||||
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||
@@ -1138,7 +1139,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
|
||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||
list := walk.NewListRHelper(callback)
|
||||
list := list.NewHelper(callback)
|
||||
err = f.recurse(ctx, dir, list)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1428,7 +1429,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
||||
}()
|
||||
|
||||
// wait until both are done
|
||||
for c := 0; c < 2; c++ {
|
||||
for range 2 {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
@@ -1753,7 +1754,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
|
||||
// Stats returns stats about the cache storage
|
||||
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
|
||||
func (f *Fs) Stats() (map[string]map[string]any, error) {
|
||||
return f.cache.Stats()
|
||||
}
|
||||
|
||||
@@ -1933,7 +1934,7 @@ var commandHelp = []fs.CommandHelp{
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
|
||||
switch name {
|
||||
case "stats":
|
||||
return f.Stats()
|
||||
|
||||
16
backend/cache/cache_internal_test.go
vendored
16
backend/cache/cache_internal_test.go
vendored
@@ -360,7 +360,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||
|
||||
for i := 0; i < len(checkSample); i++ {
|
||||
for i := range checkSample {
|
||||
require.Equal(t, testData[i], checkSample[i])
|
||||
}
|
||||
}
|
||||
@@ -387,7 +387,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(readData); i++ {
|
||||
for i := range readData {
|
||||
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
|
||||
}
|
||||
}
|
||||
@@ -688,7 +688,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
co, ok := o.(*cache.Object)
|
||||
require.True(t, ok)
|
||||
|
||||
for i := 0; i < 4; i++ { // read first 4
|
||||
for i := range 4 { // read first 4
|
||||
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||
}
|
||||
cfs.CleanUpCache(true)
|
||||
@@ -971,7 +971,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cnt); i++ {
|
||||
for range int(cnt) {
|
||||
data := randStringBytes(int(chunk))
|
||||
_, _ = f.Write(data)
|
||||
}
|
||||
@@ -1085,9 +1085,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
|
||||
var err error
|
||||
var l []interface{}
|
||||
var l []any
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
@@ -1215,7 +1215,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
||||
var err error
|
||||
var state cache.BackgroundUploadState
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
select {
|
||||
case state = <-buCh:
|
||||
// continue
|
||||
@@ -1293,7 +1293,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
|
||||
|
||||
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
|
||||
var err error
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
for range maxRetries {
|
||||
err = block()
|
||||
if err == nil {
|
||||
return nil
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -17,7 +17,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
|
||||
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@@ -162,7 +162,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
for i := range totalFiles {
|
||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
|
||||
4
backend/cache/handle.go
vendored
4
backend/cache/handle.go
vendored
@@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < r.workers; i++ {
|
||||
for i := range r.workers {
|
||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
@@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||
for i := range r.cacheFs().opt.ReadRetries * 8 {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
|
||||
8
backend/cache/plex.go
vendored
8
backend/cache/plex.go
vendored
@@ -209,7 +209,7 @@ func (p *plexConnector) authenticate() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data map[string]interface{}
|
||||
var data map[string]any
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %w", err)
|
||||
@@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
}
|
||||
|
||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
func get(m any, path ...any) (any, bool) {
|
||||
for _, p := range path {
|
||||
switch idx := p.(type) {
|
||||
case string:
|
||||
if mm, ok := m.(map[string]interface{}); ok {
|
||||
if mm, ok := m.(map[string]any); ok {
|
||||
if val, found := mm[idx]; found {
|
||||
m = val
|
||||
continue
|
||||
@@ -285,7 +285,7 @@ func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
}
|
||||
return nil, false
|
||||
case int:
|
||||
if mm, ok := m.([]interface{}); ok {
|
||||
if mm, ok := m.([]any); ok {
|
||||
if len(mm) > idx {
|
||||
m = mm[idx]
|
||||
continue
|
||||
|
||||
11
backend/cache/storage_persistent.go
vendored
11
backend/cache/storage_persistent.go
vendored
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.etcd.io/bbolt/errors"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -597,7 +598,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if err == bolt.ErrDatabaseNotOpen {
|
||||
if err == errors.ErrDatabaseNotOpen {
|
||||
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
|
||||
return
|
||||
}
|
||||
@@ -606,16 +607,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
}
|
||||
|
||||
// Stats returns a go map with the stats key values
|
||||
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
||||
r := make(map[string]map[string]interface{})
|
||||
r["data"] = make(map[string]interface{})
|
||||
func (b *Persistent) Stats() (map[string]map[string]any, error) {
|
||||
r := make(map[string]map[string]any)
|
||||
r["data"] = make(map[string]any)
|
||||
r["data"]["oldest-ts"] = time.Now()
|
||||
r["data"]["oldest-file"] = ""
|
||||
r["data"]["newest-ts"] = time.Now()
|
||||
r["data"]["newest-file"] = ""
|
||||
r["data"]["total-chunks"] = 0
|
||||
r["data"]["total-size"] = int64(0)
|
||||
r["files"] = make(map[string]interface{})
|
||||
r["files"] = make(map[string]any)
|
||||
r["files"]["oldest-ts"] = time.Now()
|
||||
r["files"]["oldest-name"] = ""
|
||||
r["files"]["newest-ts"] = time.Now()
|
||||
|
||||
1
backend/cache/utils_test.go
vendored
1
backend/cache/utils_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -356,7 +356,8 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
||||
|
||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||
f.features.ListR = nil // Recursive listing may cause chunker skip files
|
||||
f.features.ListP = nil // ListP not supported yet
|
||||
|
||||
return f, err
|
||||
}
|
||||
@@ -632,7 +633,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
|
||||
|
||||
// forbidChunk prints error message or raises error if file is chunk.
|
||||
// First argument sets log prefix, use `false` to suppress message.
|
||||
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
|
||||
func (f *Fs) forbidChunk(o any, filePath string) error {
|
||||
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
|
||||
if f.opt.FailHard {
|
||||
return fmt.Errorf("chunk overlap with %q", parentPath)
|
||||
@@ -680,7 +681,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
||||
circleSec := unixSec % closestPrimeZzzzSeconds
|
||||
first4chars := strconv.FormatInt(circleSec, 36)
|
||||
|
||||
for tries := 0; tries < maxTransactionProbes; tries++ {
|
||||
for range maxTransactionProbes {
|
||||
f.xactIDMutex.Lock()
|
||||
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
|
||||
f.xactIDMutex.Unlock()
|
||||
@@ -1189,10 +1190,7 @@ func (f *Fs) put(
|
||||
}
|
||||
|
||||
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
|
||||
size := c.sizeLeft
|
||||
if size > c.chunkSize {
|
||||
size = c.chunkSize
|
||||
}
|
||||
size := min(c.sizeLeft, c.chunkSize)
|
||||
savedReadCount := c.readCount
|
||||
|
||||
// If a single chunk is expected, avoid the extra rename operation
|
||||
@@ -1477,10 +1475,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
||||
const bufLen = 1048576 // 1 MiB
|
||||
buf := make([]byte, bufLen)
|
||||
for size > 0 {
|
||||
n := size
|
||||
if n > bufLen {
|
||||
n = bufLen
|
||||
}
|
||||
n := min(size, bufLen)
|
||||
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1866,6 +1861,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
|
||||
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.NameTransform = nil // ensure operations.Move does not double-transform here
|
||||
var (
|
||||
dest fs.Object
|
||||
err error
|
||||
@@ -2480,7 +2477,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
|
||||
@@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
})
|
||||
}
|
||||
|
||||
type settings map[string]interface{}
|
||||
type settings map[string]any
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
|
||||
@@ -46,6 +46,7 @@ func TestIntegration(t *testing.T) {
|
||||
"DirCacheFlush",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
"ListP",
|
||||
},
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
|
||||
48
backend/cloudinary/api/types.go
Normal file
48
backend/cloudinary/api/types.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Package api has type definitions for cloudinary
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CloudinaryEncoder extends the built-in encoder
|
||||
type CloudinaryEncoder interface {
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in this encoding.
|
||||
FromStandardPath(string) string
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in this encoding.
|
||||
FromStandardName(string) string
|
||||
// ToStandardPath takes a / separated path in this encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
ToStandardPath(string) string
|
||||
// ToStandardName takes name in this encoding and converts
|
||||
// it in Standard encoding.
|
||||
ToStandardName(string, string) string
|
||||
// Encoded root of the remote (as passed into NewFs)
|
||||
FromStandardFullPath(string) string
|
||||
}
|
||||
|
||||
// UpdateOptions was created to pass options from Update to Put
|
||||
type UpdateOptions struct {
|
||||
PublicID string
|
||||
ResourceType string
|
||||
DeliveryType string
|
||||
AssetFolder string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
// Header formats the option as a string
|
||||
func (o *UpdateOptions) Header() (string, string) {
|
||||
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
||||
}
|
||||
|
||||
// Mandatory returns whether the option must be parsed or can be ignored
|
||||
func (o *UpdateOptions) Mandatory() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// String formats the option into human-readable form
|
||||
func (o *UpdateOptions) String() string {
|
||||
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
||||
}
|
||||
754
backend/cloudinary/cloudinary.go
Normal file
754
backend/cloudinary/cloudinary.go
Normal file
@@ -0,0 +1,754 @@
|
||||
// Package cloudinary provides an interface to the Cloudinary DAM
|
||||
package cloudinary
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudinary/cloudinary-go/v2"
|
||||
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/admin"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
|
||||
"github.com/rclone/rclone/backend/cloudinary/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/zeebo/blake3"
|
||||
)
|
||||
|
||||
// Cloudinary shouldn't have a trailing dot if there is no path
|
||||
func cldPathDir(somePath string) string {
|
||||
if somePath == "" || somePath == "." {
|
||||
return somePath
|
||||
}
|
||||
dir := path.Dir(somePath)
|
||||
if dir == "." {
|
||||
return ""
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "cloudinary",
|
||||
Description: "Cloudinary",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "cloud_name",
|
||||
Help: "Cloudinary Environment Name",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "Cloudinary API Key",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "api_secret",
|
||||
Help: "Cloudinary API Secret",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "upload_prefix",
|
||||
Help: "Specify the API endpoint for environments out of the US",
|
||||
},
|
||||
{
|
||||
Name: "upload_preset",
|
||||
Help: "Upload Preset to select asset manipulation on upload",
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeAsterisk |
|
||||
encoder.EncodePipe |
|
||||
encoder.EncodeHash |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeDot),
|
||||
},
|
||||
{
|
||||
Name: "eventually_consistent_delay",
|
||||
Default: fs.Duration(0),
|
||||
Advanced: true,
|
||||
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
|
||||
},
|
||||
{
|
||||
Name: "adjust_media_files_extensions",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
|
||||
},
|
||||
{
|
||||
Name: "media_extensions",
|
||||
Default: []string{
|
||||
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
|
||||
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
|
||||
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
|
||||
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
|
||||
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
|
||||
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
|
||||
Advanced: true,
|
||||
Help: "Cloudinary supported media extensions",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
CloudName string `config:"cloud_name"`
|
||||
APIKey string `config:"api_key"`
|
||||
APISecret string `config:"api_secret"`
|
||||
UploadPrefix string `config:"upload_prefix"`
|
||||
UploadPreset string `config:"upload_preset"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
||||
MediaExtensions []string `config:"media_extensions"`
|
||||
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
|
||||
}
|
||||
|
||||
// Fs represents a remote cloudinary server
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
pacer *fs.Pacer
|
||||
srv *rest.Client // For downloading assets via the Cloudinary CDN
|
||||
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
|
||||
lastCRUD time.Time
|
||||
}
|
||||
|
||||
// Object describes a cloudinary object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
url string
|
||||
md5sum string
|
||||
publicID string
|
||||
resourceType string
|
||||
deliveryType string
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the Cloudinary client
|
||||
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
|
||||
}
|
||||
cld.Admin.Client = *fshttp.NewClient(ctx)
|
||||
cld.Upload.Client = *fshttp.NewClient(ctx)
|
||||
if opt.UploadPrefix != "" {
|
||||
cld.Config.API.UploadPrefix = opt.UploadPrefix
|
||||
}
|
||||
client := fshttp.NewClient(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
cld: cld,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
|
||||
srv: rest.NewClient(client),
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if root != "" {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = cldPathDir(root)
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
||||
// File doesn't exist so return the previous root
|
||||
f.root = root
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// FromStandardPath implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardPath(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
// FromStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardName(s string) string {
|
||||
if f.opt.AdjustMediaFilesExtensions {
|
||||
parsedURL, err := url.Parse(s)
|
||||
ext := ""
|
||||
if err != nil {
|
||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
||||
} else {
|
||||
ext = path.Ext(parsedURL.Path)
|
||||
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
||||
s = strings.TrimSuffix(parsedURL.Path, ext)
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
// ToStandardPath implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardPath(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
|
||||
}
|
||||
|
||||
// ToStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardName(s string, assetURL string) string {
|
||||
ext := ""
|
||||
if f.opt.AdjustMediaFilesExtensions {
|
||||
parsedURL, err := url.Parse(assetURL)
|
||||
if err != nil {
|
||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
||||
} else {
|
||||
ext = path.Ext(parsedURL.Path)
|
||||
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
||||
ext = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
|
||||
}
|
||||
|
||||
// FromStandardFullPath encodes a full path to Cloudinary standard
|
||||
func (f *Fs) FromStandardFullPath(dir string) string {
|
||||
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
|
||||
}
|
||||
|
||||
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
|
||||
func (f *Fs) ToAssetFolderAPI(dir string) string {
|
||||
return strings.ReplaceAll(dir, "%", "%25")
|
||||
}
|
||||
|
||||
// ToDisplayNameElastic encodes a special case of elasticsearch
|
||||
func (f *Fs) ToDisplayNameElastic(dir string) string {
|
||||
return strings.ReplaceAll(dir, "!", "\\!")
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// WaitEventuallyConsistent waits till the FS is eventually consistent
|
||||
func (f *Fs) WaitEventuallyConsistent() {
|
||||
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
|
||||
return
|
||||
}
|
||||
delay := time.Duration(f.opt.EventuallyConsistentDelay)
|
||||
timeSinceLastCRUD := time.Since(f.lastCRUD)
|
||||
if timeSinceLastCRUD < delay {
|
||||
time.Sleep(delay - timeSinceLastCRUD)
|
||||
}
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Cloudinary root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
remotePrefix := f.FromStandardFullPath(dir)
|
||||
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
|
||||
remotePrefix += "/"
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
dirs := make(map[string]struct{})
|
||||
nextCursor := ""
|
||||
f.WaitEventuallyConsistent()
|
||||
for {
|
||||
// user the folders api to list folders.
|
||||
folderParams := admin.SubFoldersParams{
|
||||
Folder: f.ToAssetFolderAPI(remotePrefix),
|
||||
MaxResults: 500,
|
||||
}
|
||||
if nextCursor != "" {
|
||||
folderParams.NextCursor = nextCursor
|
||||
}
|
||||
|
||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
|
||||
}
|
||||
if results.Error.Message != "" {
|
||||
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
|
||||
}
|
||||
|
||||
for _, folder := range results.Folders {
|
||||
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
|
||||
parts := strings.Split(relativePath, "/")
|
||||
|
||||
// It's a directory
|
||||
dirName := parts[len(parts)-1]
|
||||
if _, found := dirs[dirName]; !found {
|
||||
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
|
||||
entries = append(entries, d)
|
||||
dirs[dirName] = struct{}{}
|
||||
}
|
||||
}
|
||||
// Break if there are no more results
|
||||
if results.NextCursor == "" {
|
||||
break
|
||||
}
|
||||
nextCursor = results.NextCursor
|
||||
}
|
||||
|
||||
for {
|
||||
// Use the assets.AssetsByAssetFolder API to list assets
|
||||
assetsParams := admin.AssetsByAssetFolderParams{
|
||||
AssetFolder: remotePrefix,
|
||||
MaxResults: 500,
|
||||
}
|
||||
if nextCursor != "" {
|
||||
assetsParams.NextCursor = nextCursor
|
||||
}
|
||||
|
||||
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list assets: %w", err)
|
||||
}
|
||||
|
||||
for _, asset := range results.Assets {
|
||||
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(asset.Bytes),
|
||||
modTime: asset.CreatedAt,
|
||||
url: asset.SecureURL,
|
||||
publicID: asset.PublicID,
|
||||
resourceType: asset.AssetType,
|
||||
deliveryType: asset.Type,
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
|
||||
// Break if there are no more results
|
||||
if results.NextCursor == "" {
|
||||
break
|
||||
}
|
||||
nextCursor = results.NextCursor
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
searchParams := search.Query{
|
||||
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
|
||||
f.FromStandardFullPath(cldPathDir(remote)),
|
||||
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
|
||||
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
|
||||
MaxResults: 2,
|
||||
}
|
||||
var results *admin.SearchResult
|
||||
f.WaitEventuallyConsistent()
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err1 error
|
||||
results, err1 = f.cld.Admin.Search(ctx, searchParams)
|
||||
if err1 == nil && results.TotalCount != len(results.Assets) {
|
||||
err1 = errors.New("partial response so waiting for eventual consistency")
|
||||
}
|
||||
return shouldRetry(ctx, nil, err1)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if results.TotalCount == 0 || len(results.Assets) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
asset := results.Assets[0]
|
||||
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(asset.Bytes),
|
||||
modTime: asset.UploadedAt,
|
||||
url: asset.SecureURL,
|
||||
md5sum: asset.Etag,
|
||||
publicID: asset.PublicID,
|
||||
resourceType: asset.ResourceType,
|
||||
deliveryType: asset.Type,
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
|
||||
payload := []byte(path.Join(assetFolder, displayName))
|
||||
hash := blake3.Sum256(payload)
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// Put uploads content to Cloudinary
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
params := uploader.UploadParams{
|
||||
UploadPreset: f.opt.UploadPreset,
|
||||
}
|
||||
|
||||
updateObject := false
|
||||
var modTime time.Time
|
||||
for _, option := range options {
|
||||
if updateOptions, ok := option.(*api.UpdateOptions); ok {
|
||||
if updateOptions.PublicID != "" {
|
||||
updateObject = true
|
||||
params.Overwrite = SDKApi.Bool(true)
|
||||
params.Invalidate = SDKApi.Bool(true)
|
||||
params.PublicID = updateOptions.PublicID
|
||||
params.ResourceType = updateOptions.ResourceType
|
||||
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
|
||||
params.AssetFolder = updateOptions.AssetFolder
|
||||
params.DisplayName = updateOptions.DisplayName
|
||||
modTime = src.ModTime(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !updateObject {
|
||||
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
|
||||
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
|
||||
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
|
||||
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
|
||||
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
|
||||
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
|
||||
}
|
||||
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
|
||||
}
|
||||
if !updateObject {
|
||||
modTime = uploadResult.CreatedAt
|
||||
}
|
||||
if uploadResult.Error.Message != "" {
|
||||
return nil, errors.New(uploadResult.Error.Message)
|
||||
}
|
||||
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: int64(uploadResult.Bytes),
|
||||
modTime: modTime,
|
||||
url: uploadResult.SecureURL,
|
||||
md5sum: uploadResult.Etag,
|
||||
publicID: uploadResult.PublicID,
|
||||
resourceType: uploadResult.ResourceType,
|
||||
deliveryType: uploadResult.Type,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// Mkdir creates empty folders
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
|
||||
res, err := f.cld.Admin.CreateFolder(ctx, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Error.Message != "" {
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes empty folders
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// Additional test because Cloudinary will delete folders without
|
||||
// assets, regardless of empty sub-folders
|
||||
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
|
||||
folderParams := admin.SubFoldersParams{
|
||||
Folder: folder,
|
||||
MaxResults: 1,
|
||||
}
|
||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if results.TotalCount > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
params := admin.DeleteFolderParams{Folder: folder}
|
||||
res, err := f.cld.Admin.DeleteFolder(ctx, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Error.Message != "" {
|
||||
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
420, // Too Many Requests (legacy)
|
||||
429, // Too Many Requests
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err != nil {
|
||||
tryAgain := "Try again on "
|
||||
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
|
||||
layout := "2006-01-02 15:04:05 UTC"
|
||||
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
|
||||
timestamp, err2 := time.Parse(layout, dateStr)
|
||||
if err2 == nil {
|
||||
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Hash returns the MD5 of an object
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5sum, nil
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size of object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.url,
|
||||
Options: options,
|
||||
}
|
||||
var offset int64
|
||||
var count int64
|
||||
var key string
|
||||
var value string
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
key, value = option.Header()
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
key, value = option.Header()
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if key != "" && value != "" {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders[key] = value
|
||||
}
|
||||
// Make sure that the asset is fully available
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
|
||||
if clErr == nil && count == int64(cl) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
options = append(options, &api.UpdateOptions{
|
||||
PublicID: o.publicID,
|
||||
ResourceType: o.resourceType,
|
||||
DeliveryType: o.deliveryType,
|
||||
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
|
||||
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
|
||||
})
|
||||
updatedObj, err := o.fs.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uo, ok := updatedObj.(*Object); ok {
|
||||
o.size = uo.size
|
||||
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
|
||||
o.url = uo.url
|
||||
o.md5sum = uo.md5sum
|
||||
o.publicID = uo.publicID
|
||||
o.resourceType = uo.resourceType
|
||||
o.deliveryType = uo.deliveryType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := uploader.DestroyParams{
|
||||
PublicID: o.publicID,
|
||||
ResourceType: o.resourceType,
|
||||
Type: o.deliveryType,
|
||||
}
|
||||
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
|
||||
o.fs.lastCRUD = time.Now()
|
||||
if dErr != nil {
|
||||
return dErr
|
||||
}
|
||||
|
||||
if res.Error.Message != "" {
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
if res.Result != "ok" {
|
||||
return errors.New(res.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
23
backend/cloudinary/cloudinary_test.go
Normal file
23
backend/cloudinary/cloudinary_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
// Test Cloudinary filesystem interface
|
||||
|
||||
package cloudinary_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/cloudinary"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestCloudinary"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*cloudinary.Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -186,7 +187,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
var mu sync.Mutex
|
||||
for _, upstream := range opt.Upstreams {
|
||||
upstream := upstream
|
||||
g.Go(func() (err error) {
|
||||
equal := strings.IndexRune(upstream, '=')
|
||||
if equal < 0 {
|
||||
@@ -240,18 +240,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
canMove, slowHash := true, false
|
||||
for _, u := range f.upstreams {
|
||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||
if !operations.CanServerSideMove(u.f) {
|
||||
canMove = false
|
||||
}
|
||||
slowHash = slowHash || u.f.Features().SlowHash
|
||||
}
|
||||
// We can move if all remotes support Move or Copy
|
||||
if canMove {
|
||||
features.Move = f.Move
|
||||
}
|
||||
|
||||
// If any of upstreams are SlowHash, propagate it
|
||||
features.SlowHash = slowHash
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
@@ -265,6 +269,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// Enable ListP always
|
||||
features.ListP = f.ListP
|
||||
|
||||
// Enable Purge when any upstreams support it
|
||||
if features.Purge == nil {
|
||||
for _, u := range f.upstreams {
|
||||
@@ -362,7 +369,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
g.Go(func() (err error) {
|
||||
return fn(gCtx, u)
|
||||
})
|
||||
@@ -629,7 +635,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
var uChans []chan time.Duration
|
||||
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
if do := u.f.Features().ChangeNotify; do != nil {
|
||||
ch := make(chan time.Duration)
|
||||
uChans = append(uChans, ch)
|
||||
@@ -809,24 +814,52 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||
if f.root == "" && dir == "" {
|
||||
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||
entries := make(fs.DirEntries, 0, len(f.upstreams))
|
||||
for combineDir := range f.upstreams {
|
||||
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
return callback(entries)
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
entries, err = u.f.List(ctx, uRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := u.wrapEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
return u.wrapEntries(ctx, entries)
|
||||
listP := u.f.Features().ListP
|
||||
if listP == nil {
|
||||
entries, err := u.f.List(ctx, uRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, uRemote, wrappedCallback)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
|
||||
@@ -2,10 +2,8 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
@@ -29,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -45,6 +44,7 @@ const (
|
||||
minCompressionRatio = 1.1
|
||||
|
||||
gzFileExt = ".gz"
|
||||
zstdFileExt = ".zst"
|
||||
metaFileExt = ".json"
|
||||
uncompressedFileExt = ".bin"
|
||||
)
|
||||
@@ -53,6 +53,7 @@ const (
|
||||
const (
|
||||
Uncompressed = 0
|
||||
Gzip = 2
|
||||
Zstd = 4
|
||||
)
|
||||
|
||||
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
||||
@@ -65,6 +66,10 @@ func init() {
|
||||
Value: "gzip",
|
||||
Help: "Standard gzip compression with fastest parameters.",
|
||||
},
|
||||
{
|
||||
Value: "zstd",
|
||||
Help: "Zstandard compression — fast modern algorithm offering adjustable speed-to-compression tradeoffs.",
|
||||
},
|
||||
}
|
||||
|
||||
// Register our remote
|
||||
@@ -86,17 +91,23 @@ func init() {
|
||||
Examples: compressionModeOptions,
|
||||
}, {
|
||||
Name: "level",
|
||||
Help: `GZIP compression level (-2 to 9).
|
||||
|
||||
Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||
are doing.
|
||||
Level 0 turns off compression.`,
|
||||
Default: sgzip.DefaultCompression,
|
||||
Advanced: true,
|
||||
Help: `GZIP (levels -2 to 9):
|
||||
- -2 — Huffman encoding only. Only use if you know what you're doing.
|
||||
- -1 (default) — recommended; equivalent to level 5.
|
||||
- 0 — turns off compression.
|
||||
- 1–9 — increase compression at the cost of speed. Going past 6 generally offers very little return.
|
||||
|
||||
ZSTD (levels 0 to 4):
|
||||
- 0 — turns off compression entirely.
|
||||
- 1 — fastest compression with the lowest ratio.
|
||||
- 2 (default) — good balance of speed and compression.
|
||||
- 3 — better compression, but uses about 2–3x more CPU than the default.
|
||||
- 4 — best possible compression ratio (highest CPU cost).
|
||||
|
||||
Notes:
|
||||
- Choose GZIP for wide compatibility; ZSTD for better speed/ratio tradeoffs.
|
||||
- Negative gzip levels: -2 = Huffman-only, -1 = default (≈ level 5).`,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "ram_cache_limit",
|
||||
Help: `Some remotes don't allow the upload of files with unknown size.
|
||||
@@ -111,6 +122,47 @@ this limit will be cached on disk.`,
|
||||
})
|
||||
}
|
||||
|
||||
// compressionModeHandler defines the interface for handling different compression modes
|
||||
type compressionModeHandler interface {
|
||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||
processFileNameGetFileExtension(compressionMode int) string
|
||||
|
||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||
newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error)
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
isCompressible(r io.Reader, compressionMode int) (bool, error)
|
||||
|
||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||
putCompress(
|
||||
ctx context.Context,
|
||||
f *Fs,
|
||||
in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
options []fs.OpenOption,
|
||||
mimeType string,
|
||||
) (fs.Object, *ObjectMetadata, error)
|
||||
|
||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||
openGetReadCloser(
|
||||
ctx context.Context,
|
||||
o *Object,
|
||||
offset int64,
|
||||
limit int64,
|
||||
cr chunkedreader.ChunkedReader,
|
||||
closer io.Closer,
|
||||
options ...fs.OpenOption,
|
||||
) (rc io.ReadCloser, err error)
|
||||
|
||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||
putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error)
|
||||
|
||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||
// Warning: This function panics if cmeta is not of the expected type.
|
||||
newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
@@ -124,12 +176,13 @@ type Options struct {
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
mode int // compression mode id
|
||||
features *fs.Features // optional features
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
mode int // compression mode id
|
||||
features *fs.Features // optional features
|
||||
modeHandler compressionModeHandler // compression mode handler
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
@@ -166,13 +219,28 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
||||
}
|
||||
|
||||
compressionMode := compressionModeFromName(opt.CompressionMode)
|
||||
var modeHandler compressionModeHandler
|
||||
|
||||
switch compressionMode {
|
||||
case Gzip:
|
||||
modeHandler = &gzipModeHandler{}
|
||||
case Zstd:
|
||||
modeHandler = &zstdModeHandler{}
|
||||
case Uncompressed:
|
||||
modeHandler = &uncompressedModeHandler{}
|
||||
default:
|
||||
modeHandler = &unknownModeHandler{}
|
||||
}
|
||||
|
||||
// Create the wrapping fs
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
mode: compressionModeFromName(opt.CompressionMode),
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
mode: compressionMode,
|
||||
modeHandler: modeHandler,
|
||||
}
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
@@ -208,14 +276,19 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
if !operations.CanServerSideMove(wrappedFs) {
|
||||
f.features.Disable("PutStream")
|
||||
}
|
||||
// Enable ListP always
|
||||
f.features.ListP = f.ListP
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
// compressionModeFromName converts a compression mode name to its int representation.
|
||||
func compressionModeFromName(name string) int {
|
||||
switch name {
|
||||
case "gzip":
|
||||
return Gzip
|
||||
case "zstd":
|
||||
return Zstd
|
||||
default:
|
||||
return Uncompressed
|
||||
}
|
||||
@@ -239,7 +312,7 @@ func base64ToInt64(str string) (int64, error) {
|
||||
|
||||
// Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file.
|
||||
// Returns -2 for the original size if the file is uncompressed.
|
||||
func processFileName(compressedFileName string) (origFileName string, extension string, origSize int64, err error) {
|
||||
func processFileName(compressedFileName string, modeHandler compressionModeHandler) (origFileName string, extension string, origSize int64, err error) {
|
||||
// Separate the filename and size from the extension
|
||||
extensionPos := strings.LastIndex(compressedFileName, ".")
|
||||
if extensionPos == -1 {
|
||||
@@ -258,7 +331,8 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
||||
if err != nil {
|
||||
return "", "", 0, errors.New("could not decode size")
|
||||
}
|
||||
return match[1], gzFileExt, size, nil
|
||||
ext := modeHandler.processFileNameGetFileExtension(compressionModeFromName(compressedFileName[extensionPos+1:]))
|
||||
return match[1], ext, size, nil
|
||||
}
|
||||
|
||||
// Generates the file name for a metadata file
|
||||
@@ -283,11 +357,15 @@ func unwrapMetadataFile(filename string) (string, bool) {
|
||||
|
||||
// makeDataName generates the file name for a data file with specified compression mode
|
||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||
if mode != Uncompressed {
|
||||
switch mode {
|
||||
case Gzip:
|
||||
newRemote = remote + "." + int64ToBase64(size) + gzFileExt
|
||||
} else {
|
||||
case Zstd:
|
||||
newRemote = remote + "." + int64ToBase64(size) + zstdFileExt
|
||||
default:
|
||||
newRemote = remote + uncompressedFileExt
|
||||
}
|
||||
|
||||
return newRemote
|
||||
}
|
||||
|
||||
@@ -301,7 +379,7 @@ func (f *Fs) dataName(remote string, size int64, compressed bool) (name string)
|
||||
|
||||
// addData parses an object and adds it to the DirEntries
|
||||
func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) {
|
||||
origFileName, _, size, err := processFileName(o.Remote())
|
||||
origFileName, _, size, err := processFileName(o.Remote(), f.modeHandler)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Error on parsing file name: %v", err)
|
||||
return
|
||||
@@ -352,11 +430,39 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
||||
// found.
|
||||
// List entries and process them
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.Fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := f.processEntries(entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
return f.processEntries(entries)
|
||||
listP := f.Fs.Features().ListP
|
||||
if listP == nil {
|
||||
entries, err := f.Fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, dir, wrappedCallback)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -396,8 +502,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
||||
}
|
||||
size, err := f.modeHandler.newObjectGetOriginalSize(meta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading metadata: %w", err)
|
||||
}
|
||||
// Create our Object
|
||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, size, meta.Mode))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -406,7 +516,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||
// returns a multireader with the bytes that were read to determine mime type
|
||||
func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool, mimeType string, err error) {
|
||||
func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compressionModeHandler) (newReader io.Reader, compressible bool, mimeType string, err error) {
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
buf := make([]byte, heuristicBytes)
|
||||
n, err := in.Read(buf)
|
||||
@@ -415,7 +525,7 @@ func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool,
|
||||
return nil, false, "", err
|
||||
}
|
||||
mime := mimetype.Detect(buf)
|
||||
compressible, err = isCompressible(bytes.NewReader(buf))
|
||||
compressible, err = modeHandler.isCompressible(bytes.NewReader(buf), compressionMode)
|
||||
if err != nil {
|
||||
return nil, false, "", err
|
||||
}
|
||||
@@ -423,26 +533,6 @@ func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool,
|
||||
return wrap(in), compressible, mime.String(), nil
|
||||
}
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
func isCompressible(r io.Reader) (bool, error) {
|
||||
var b bytes.Buffer
|
||||
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
n, err := io.Copy(w, r)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
|
||||
// verifyObjectHash verifies the Objects hash
|
||||
func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error {
|
||||
srcHash := hasher.Sums()[ht]
|
||||
@@ -463,9 +553,9 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
||||
|
||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
|
||||
type compressionResult struct {
|
||||
type compressionResult[T sgzip.GzipMetadata | SzstdMetadata] struct {
|
||||
err error
|
||||
meta sgzip.GzipMetadata
|
||||
meta T
|
||||
}
|
||||
|
||||
// replicating some of operations.Rcat functionality because we want to support remotes without streaming
|
||||
@@ -506,106 +596,18 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
||||
}
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to seek temporary local file: %w", err)
|
||||
}
|
||||
finfo, err := tempFile.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to stat temporary local file: %w", err)
|
||||
}
|
||||
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
|
||||
}
|
||||
|
||||
// Put a compressed version of a file. Returns a wrappable object and metadata.
|
||||
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
||||
// Unwrap reader accounting
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Add the metadata hasher
|
||||
metaHasher := md5.New()
|
||||
in = io.TeeReader(in, metaHasher)
|
||||
|
||||
// Compress the file
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
results := make(chan compressionResult)
|
||||
go func() {
|
||||
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
||||
if err != nil {
|
||||
results <- compressionResult{err: err, meta: sgzip.GzipMetadata{}}
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(gz, in)
|
||||
gzErr := gz.Close()
|
||||
if gzErr != nil {
|
||||
fs.Errorf(nil, "Failed to close compress: %v", gzErr)
|
||||
if err == nil {
|
||||
err = gzErr
|
||||
}
|
||||
}
|
||||
closeErr := pipeWriter.Close()
|
||||
if closeErr != nil {
|
||||
fs.Errorf(nil, "Failed to close pipe: %v", closeErr)
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}
|
||||
results <- compressionResult{err: err, meta: gz.MetaData()}
|
||||
}()
|
||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
||||
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the compressed data.
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
var err error
|
||||
if ht != hash.None {
|
||||
// unwrap the accounting again
|
||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// add the hasher and re-wrap the accounting
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
wrappedIn = wrap(wrappedIn)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
removeErr := o.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially transferred object: %v", err)
|
||||
}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
// Check whether we got an error during compression
|
||||
result := <-results
|
||||
err = result.err
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
removeErr := o.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially compressed object: %v", err)
|
||||
}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Generate metadata
|
||||
meta := newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
||||
|
||||
// Check the hashes of the compressed data if we were comparing them
|
||||
if ht != hash.None && hasher != nil {
|
||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return o, meta, nil
|
||||
return f.modeHandler.putCompress(ctx, f, in, src, options, mimeType)
|
||||
}
|
||||
|
||||
// Put an uncompressed version of a file. Returns a wrappable object and metadata.
|
||||
@@ -649,7 +651,8 @@ func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return o, newMetadata(o.Size(), Uncompressed, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
||||
|
||||
return f.modeHandler.putUncompressGetNewMetadata(o, Uncompressed, hex.EncodeToString(sum), mimeType, sum)
|
||||
}
|
||||
|
||||
// This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object.
|
||||
@@ -720,7 +723,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// Get our file compressibility
|
||||
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -740,7 +743,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
found := err == nil
|
||||
|
||||
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1059,11 +1062,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, duration fs.Duration
|
||||
|
||||
// ObjectMetadata describes the metadata for an Object.
|
||||
type ObjectMetadata struct {
|
||||
Mode int // Compression mode of the file.
|
||||
Size int64 // Size of the object.
|
||||
MD5 string // MD5 hash of the file.
|
||||
MimeType string // Mime type of the file
|
||||
CompressionMetadata sgzip.GzipMetadata
|
||||
Mode int // Compression mode of the file.
|
||||
Size int64 // Size of the object.
|
||||
MD5 string // MD5 hash of the file.
|
||||
MimeType string // Mime type of the file
|
||||
CompressionMetadataGzip *sgzip.GzipMetadata // Metadata for Gzip compression
|
||||
CompressionMetadataZstd *SzstdMetadata // Metadata for Zstd compression
|
||||
}
|
||||
|
||||
// Object with external metadata
|
||||
@@ -1076,17 +1080,6 @@ type Object struct {
|
||||
meta *ObjectMetadata // Metadata struct for this object (nil if not loaded)
|
||||
}
|
||||
|
||||
// This function generates a metadata object
|
||||
func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mimeType string) *ObjectMetadata {
|
||||
meta := new(ObjectMetadata)
|
||||
meta.Size = size
|
||||
meta.Mode = mode
|
||||
meta.CompressionMetadata = cmeta
|
||||
meta.MD5 = md5
|
||||
meta.MimeType = mimeType
|
||||
return meta
|
||||
}
|
||||
|
||||
// This function will read the metadata from a metadata object.
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
||||
// Open our meradata object
|
||||
@@ -1134,7 +1127,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.mo, o.mo.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||
in, compressible, mimeType, err := checkCompressAndType(in, o.meta.Mode, o.f.modeHandler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1247,7 +1240,7 @@ func (o *Object) String() string {
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
origFileName, _, _, err := processFileName(o.Object.Remote())
|
||||
origFileName, _, _, err := processFileName(o.Object.Remote(), o.f.modeHandler)
|
||||
if err != nil {
|
||||
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
|
||||
return o.Object.Remote()
|
||||
@@ -1350,7 +1343,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
return o.Object.Open(ctx, options...)
|
||||
}
|
||||
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
||||
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -1358,31 +1350,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
openOptions = append(openOptions, option)
|
||||
}
|
||||
}
|
||||
// Get a chunkedreader for the wrapped object
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||
// Get file handle
|
||||
var file io.Reader
|
||||
if offset != 0 {
|
||||
file, err = sgzip.NewReaderAt(chunkedReader, &o.meta.CompressionMetadata, offset)
|
||||
} else {
|
||||
file, err = sgzip.NewReader(chunkedReader)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fileReader io.Reader
|
||||
if limit != -1 {
|
||||
fileReader = io.LimitReader(file, limit)
|
||||
} else {
|
||||
fileReader = file
|
||||
}
|
||||
// Return a ReadCloser
|
||||
return ReadCloserWrapper{Reader: fileReader, Closer: chunkedReader}, nil
|
||||
var retCloser io.Closer = chunkedReader
|
||||
return o.f.modeHandler.openGetReadCloser(ctx, o, offset, limit, chunkedReader, retCloser, options...)
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
|
||||
@@ -48,7 +48,27 @@ func TestRemoteGzip(t *testing.T) {
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
{Name: name, Key: "mode", Value: "gzip"},
|
||||
{Name: name, Key: "level", Value: "-1"},
|
||||
}
|
||||
opt.QuickTestOK = true
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
// TestRemoteZstd tests ZSTD compression
|
||||
func TestRemoteZstd(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-zstd")
|
||||
name := "TestCompressZstd"
|
||||
opt := defaultOpt
|
||||
opt.RemoteName = name + ":"
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "mode", Value: "zstd"},
|
||||
{Name: name, Key: "level", Value: "2"},
|
||||
}
|
||||
opt.QuickTestOK = true
|
||||
fstests.Run(t, &opt)
|
||||
|
||||
207
backend/compress/gzip_handler.go
Normal file
207
backend/compress/gzip_handler.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/buengese/sgzip"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// gzipModeHandler implements compressionModeHandler for gzip
|
||||
type gzipModeHandler struct{}
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
func (g *gzipModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||
var b bytes.Buffer
|
||||
var n int64
|
||||
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
n, err = io.Copy(w, r)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
|
||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||
func (g *gzipModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||
if meta.CompressionMetadataGzip == nil {
|
||||
return 0, errors.New("missing gzip metadata")
|
||||
}
|
||||
return meta.CompressionMetadataGzip.Size, nil
|
||||
}
|
||||
|
||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||
func (g *gzipModeHandler) openGetReadCloser(
|
||||
ctx context.Context,
|
||||
o *Object,
|
||||
offset int64,
|
||||
limit int64,
|
||||
cr chunkedreader.ChunkedReader,
|
||||
closer io.Closer,
|
||||
options ...fs.OpenOption,
|
||||
) (rc io.ReadCloser, err error) {
|
||||
var file io.Reader
|
||||
|
||||
if offset != 0 {
|
||||
file, err = sgzip.NewReaderAt(cr, o.meta.CompressionMetadataGzip, offset)
|
||||
} else {
|
||||
file, err = sgzip.NewReader(cr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fileReader io.Reader
|
||||
if limit != -1 {
|
||||
fileReader = io.LimitReader(file, limit)
|
||||
} else {
|
||||
fileReader = file
|
||||
}
|
||||
// Return a ReadCloser
|
||||
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
||||
}
|
||||
|
||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||
func (g *gzipModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||
if compressionMode == Gzip {
|
||||
return gzFileExt
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||
func (g *gzipModeHandler) putCompress(
|
||||
ctx context.Context,
|
||||
f *Fs,
|
||||
in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
options []fs.OpenOption,
|
||||
mimeType string,
|
||||
) (fs.Object, *ObjectMetadata, error) {
|
||||
// Unwrap reader accounting
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Add the metadata hasher
|
||||
metaHasher := md5.New()
|
||||
in = io.TeeReader(in, metaHasher)
|
||||
|
||||
// Compress the file
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
resultsGzip := make(chan compressionResult[sgzip.GzipMetadata])
|
||||
go func() {
|
||||
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
||||
if err != nil {
|
||||
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: sgzip.GzipMetadata{}}
|
||||
close(resultsGzip)
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(gz, in)
|
||||
gzErr := gz.Close()
|
||||
if gzErr != nil && err == nil {
|
||||
err = gzErr
|
||||
}
|
||||
closeErr := pipeWriter.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: gz.MetaData()}
|
||||
close(resultsGzip)
|
||||
}()
|
||||
|
||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
||||
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the compressed data.
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
var err error
|
||||
if ht != hash.None {
|
||||
// unwrap the accounting again
|
||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// add the hasher and re-wrap the accounting
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
wrappedIn = wrap(wrappedIn)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
if removeErr := o.Remove(ctx); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially transferred object: %v", removeErr)
|
||||
}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
// Check whether we got an error during compression
|
||||
result := <-resultsGzip
|
||||
if result.err != nil {
|
||||
if o != nil {
|
||||
if removeErr := o.Remove(ctx); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially compressed object: %v", removeErr)
|
||||
}
|
||||
}
|
||||
return nil, nil, result.err
|
||||
}
|
||||
|
||||
// Generate metadata
|
||||
meta := g.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
||||
|
||||
// Check the hashes of the compressed data if we were comparing them
|
||||
if ht != hash.None && hasher != nil {
|
||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return o, meta, nil
|
||||
}
|
||||
|
||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||
func (g *gzipModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||
return o, g.newMetadata(o.Size(), mode, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
||||
}
|
||||
|
||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||
// Warning: This function panics if cmeta is not of the expected type.
|
||||
func (g *gzipModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||
meta, ok := cmeta.(sgzip.GzipMetadata)
|
||||
if !ok {
|
||||
panic("invalid cmeta type: expected sgzip.GzipMetadata")
|
||||
}
|
||||
|
||||
objMeta := new(ObjectMetadata)
|
||||
objMeta.Size = size
|
||||
objMeta.Mode = mode
|
||||
objMeta.CompressionMetadataGzip = &meta
|
||||
objMeta.CompressionMetadataZstd = nil
|
||||
objMeta.MD5 = md5
|
||||
objMeta.MimeType = mimeType
|
||||
|
||||
return objMeta
|
||||
}
|
||||
327
backend/compress/szstd_helper.go
Normal file
327
backend/compress/szstd_helper.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
szstd "github.com/a1ex3/zstd-seekable-format-go/pkg"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
)
|
||||
|
||||
const szstdChunkSize int = 1 << 20 // 1 MiB chunk size
|
||||
|
||||
// SzstdMetadata holds metadata for szstd compressed files.
|
||||
type SzstdMetadata struct {
|
||||
BlockSize int // BlockSize is the size of the blocks in the zstd file
|
||||
Size int64 // Size is the uncompressed size of the file
|
||||
BlockData []uint32 // BlockData is the block data for the zstd file, used for seeking
|
||||
}
|
||||
|
||||
// SzstdWriter is a writer that compresses data in szstd format.
|
||||
type SzstdWriter struct {
|
||||
enc *zstd.Encoder
|
||||
w szstd.ConcurrentWriter
|
||||
metadata SzstdMetadata
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewWriterSzstd creates a new szstd writer with the specified options.
|
||||
// It initializes the szstd writer with a zstd encoder and returns a pointer to the SzstdWriter.
|
||||
// The writer can be used to write data in chunks, and it will automatically handle block sizes and metadata.
|
||||
func NewWriterSzstd(w io.Writer, opts ...zstd.EOption) (*SzstdWriter, error) {
|
||||
encoder, err := zstd.NewWriter(nil, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sw, err := szstd.NewWriter(w, encoder)
|
||||
if err != nil {
|
||||
if err := encoder.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SzstdWriter{
|
||||
enc: encoder,
|
||||
w: sw,
|
||||
metadata: SzstdMetadata{
|
||||
BlockSize: szstdChunkSize,
|
||||
Size: 0,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Write writes data to the szstd writer in chunks of szstdChunkSize.
|
||||
// It handles the block size and metadata updates automatically.
|
||||
func (w *SzstdWriter) Write(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if w.metadata.BlockData == nil {
|
||||
numBlocks := (len(p) + w.metadata.BlockSize - 1) / w.metadata.BlockSize
|
||||
w.metadata.BlockData = make([]uint32, 1, numBlocks+1)
|
||||
w.metadata.BlockData[0] = 0
|
||||
}
|
||||
|
||||
start := 0
|
||||
total := len(p)
|
||||
|
||||
var writerFunc szstd.FrameSource = func() ([]byte, error) {
|
||||
if start >= total {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
end := min(start+w.metadata.BlockSize, total)
|
||||
chunk := p[start:end]
|
||||
size := end - start
|
||||
|
||||
w.mu.Lock()
|
||||
w.metadata.Size += int64(size)
|
||||
w.mu.Unlock()
|
||||
|
||||
start = end
|
||||
return chunk, nil
|
||||
}
|
||||
|
||||
// write sizes of compressed blocks in the callback
|
||||
err := w.w.WriteMany(context.Background(), writerFunc,
|
||||
szstd.WithWriteCallback(func(size uint32) {
|
||||
w.mu.Lock()
|
||||
lastOffset := w.metadata.BlockData[len(w.metadata.BlockData)-1]
|
||||
w.metadata.BlockData = append(w.metadata.BlockData, lastOffset+size)
|
||||
w.mu.Unlock()
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// Close closes the SzstdWriter and its underlying encoder.
|
||||
func (w *SzstdWriter) Close() error {
|
||||
if err := w.w.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.enc.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMetadata returns the metadata of the szstd writer.
|
||||
func (w *SzstdWriter) GetMetadata() SzstdMetadata {
|
||||
return w.metadata
|
||||
}
|
||||
|
||||
// SzstdReaderAt is a reader that allows random access in szstd compressed data.
|
||||
type SzstdReaderAt struct {
|
||||
r szstd.Reader
|
||||
decoder *zstd.Decoder
|
||||
metadata *SzstdMetadata
|
||||
pos int64
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewReaderAtSzstd creates a new SzstdReaderAt at the specified io.ReadSeeker.
|
||||
func NewReaderAtSzstd(rs io.ReadSeeker, meta *SzstdMetadata, offset int64, opts ...zstd.DOption) (*SzstdReaderAt, error) {
|
||||
decoder, err := zstd.NewReader(nil, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := szstd.NewReader(rs, decoder)
|
||||
if err != nil {
|
||||
decoder.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sr := &SzstdReaderAt{
|
||||
r: r,
|
||||
decoder: decoder,
|
||||
metadata: meta,
|
||||
pos: 0,
|
||||
}
|
||||
|
||||
// Set initial position to the provided offset
|
||||
if _, err := sr.Seek(offset, io.SeekStart); err != nil {
|
||||
if err := sr.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sr, nil
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next Read.
|
||||
func (s *SzstdReaderAt) Seek(offset int64, whence int) (int64, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
pos, err := s.r.Seek(offset, whence)
|
||||
if err == nil {
|
||||
s.pos = pos
|
||||
}
|
||||
return pos, err
|
||||
}
|
||||
|
||||
func (s *SzstdReaderAt) Read(p []byte) (int, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
n, err := s.r.Read(p)
|
||||
if err == nil {
|
||||
s.pos += int64(n)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadAt reads data at the specified offset.
|
||||
func (s *SzstdReaderAt) ReadAt(p []byte, off int64) (int, error) {
|
||||
if off < 0 {
|
||||
return 0, errors.New("invalid offset")
|
||||
}
|
||||
if off >= s.metadata.Size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
endOff := min(off+int64(len(p)), s.metadata.Size)
|
||||
|
||||
// Find all blocks covered by the range
|
||||
type blockInfo struct {
|
||||
index int // Block index
|
||||
offsetInBlock int64 // Offset within the block for starting reading
|
||||
bytesToRead int64 // How many bytes to read from this block
|
||||
}
|
||||
|
||||
var blocks []blockInfo
|
||||
uncompressedOffset := int64(0)
|
||||
currentOff := off
|
||||
|
||||
for i := 0; i < len(s.metadata.BlockData)-1; i++ {
|
||||
blockUncompressedEnd := min(uncompressedOffset+int64(s.metadata.BlockSize), s.metadata.Size)
|
||||
|
||||
if currentOff < blockUncompressedEnd && endOff > uncompressedOffset {
|
||||
offsetInBlock := max(0, currentOff-uncompressedOffset)
|
||||
bytesToRead := min(blockUncompressedEnd-uncompressedOffset-offsetInBlock, endOff-currentOff)
|
||||
|
||||
blocks = append(blocks, blockInfo{
|
||||
index: i,
|
||||
offsetInBlock: offsetInBlock,
|
||||
bytesToRead: bytesToRead,
|
||||
})
|
||||
|
||||
currentOff += bytesToRead
|
||||
if currentOff >= endOff {
|
||||
break
|
||||
}
|
||||
}
|
||||
uncompressedOffset = blockUncompressedEnd
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Parallel block decoding
|
||||
type decodeResult struct {
|
||||
index int
|
||||
data []byte
|
||||
err error
|
||||
}
|
||||
|
||||
resultCh := make(chan decodeResult, len(blocks))
|
||||
var wg sync.WaitGroup
|
||||
sem := make(chan struct{}, runtime.NumCPU())
|
||||
|
||||
for _, block := range blocks {
|
||||
wg.Add(1)
|
||||
go func(block blockInfo) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
|
||||
startOffset := int64(s.metadata.BlockData[block.index])
|
||||
endOffset := int64(s.metadata.BlockData[block.index+1])
|
||||
compressedSize := endOffset - startOffset
|
||||
|
||||
compressed := make([]byte, compressedSize)
|
||||
_, err := s.r.ReadAt(compressed, startOffset)
|
||||
if err != nil && err != io.EOF {
|
||||
resultCh <- decodeResult{index: block.index, err: err}
|
||||
return
|
||||
}
|
||||
|
||||
decoded, err := s.decoder.DecodeAll(compressed, nil)
|
||||
if err != nil {
|
||||
resultCh <- decodeResult{index: block.index, err: err}
|
||||
return
|
||||
}
|
||||
|
||||
resultCh <- decodeResult{index: block.index, data: decoded, err: nil}
|
||||
}(block)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(resultCh)
|
||||
}()
|
||||
|
||||
// Collect results in block index order
|
||||
totalRead := 0
|
||||
results := make(map[int]decodeResult)
|
||||
expected := len(blocks)
|
||||
minIndex := blocks[0].index
|
||||
|
||||
for res := range resultCh {
|
||||
results[res.index] = res
|
||||
for {
|
||||
if result, ok := results[minIndex]; ok {
|
||||
if result.err != nil {
|
||||
return 0, result.err
|
||||
}
|
||||
// find the corresponding blockInfo
|
||||
var blk blockInfo
|
||||
for _, b := range blocks {
|
||||
if b.index == result.index {
|
||||
blk = b
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
start := blk.offsetInBlock
|
||||
end := start + blk.bytesToRead
|
||||
copy(p[totalRead:totalRead+int(blk.bytesToRead)], result.data[start:end])
|
||||
totalRead += int(blk.bytesToRead)
|
||||
minIndex++
|
||||
if minIndex-blocks[0].index >= len(blocks) {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(results) == expected && minIndex-blocks[0].index >= len(blocks) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return totalRead, nil
|
||||
}
|
||||
|
||||
// Close closes the SzstdReaderAt and underlying decoder.
|
||||
func (s *SzstdReaderAt) Close() error {
|
||||
if err := s.r.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
s.decoder.Close()
|
||||
return nil
|
||||
}
|
||||
65
backend/compress/uncompressed_handler.go
Normal file
65
backend/compress/uncompressed_handler.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
)
|
||||
|
||||
// uncompressedModeHandler implements compressionModeHandler for uncompressed files
|
||||
type uncompressedModeHandler struct{}
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
func (u *uncompressedModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||
func (u *uncompressedModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||
func (u *uncompressedModeHandler) openGetReadCloser(
|
||||
ctx context.Context,
|
||||
o *Object,
|
||||
offset int64,
|
||||
limit int64,
|
||||
cr chunkedreader.ChunkedReader,
|
||||
closer io.Closer,
|
||||
options ...fs.OpenOption,
|
||||
) (rc io.ReadCloser, err error) {
|
||||
return o.Object.Open(ctx, options...)
|
||||
}
|
||||
|
||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||
func (u *uncompressedModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||
func (u *uncompressedModeHandler) putCompress(
|
||||
ctx context.Context,
|
||||
f *Fs,
|
||||
in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
options []fs.OpenOption,
|
||||
mimeType string,
|
||||
) (fs.Object, *ObjectMetadata, error) {
|
||||
return nil, nil, fmt.Errorf("unsupported compression mode %d", f.mode)
|
||||
}
|
||||
|
||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||
func (u *uncompressedModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||
return nil, nil, fmt.Errorf("unsupported compression mode %d", Uncompressed)
|
||||
}
|
||||
|
||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||
// Warning: This function panics if cmeta is not of the expected type.
|
||||
func (u *uncompressedModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||
return nil
|
||||
}
|
||||
65
backend/compress/unknown_handler.go
Normal file
65
backend/compress/unknown_handler.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
)
|
||||
|
||||
// unknownModeHandler implements compressionModeHandler for unknown compression types
|
||||
type unknownModeHandler struct{}
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
func (unk *unknownModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||
return false, fmt.Errorf("unknown compression mode %d", compressionMode)
|
||||
}
|
||||
|
||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||
func (unk *unknownModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||
func (unk *unknownModeHandler) openGetReadCloser(
|
||||
ctx context.Context,
|
||||
o *Object,
|
||||
offset int64,
|
||||
limit int64,
|
||||
cr chunkedreader.ChunkedReader,
|
||||
closer io.Closer,
|
||||
options ...fs.OpenOption,
|
||||
) (rc io.ReadCloser, err error) {
|
||||
return nil, fmt.Errorf("unknown compression mode %d", o.meta.Mode)
|
||||
}
|
||||
|
||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||
func (unk *unknownModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||
func (unk *unknownModeHandler) putCompress(
|
||||
ctx context.Context,
|
||||
f *Fs,
|
||||
in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
options []fs.OpenOption,
|
||||
mimeType string,
|
||||
) (fs.Object, *ObjectMetadata, error) {
|
||||
return nil, nil, fmt.Errorf("unknown compression mode %d", f.mode)
|
||||
}
|
||||
|
||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||
func (unk *unknownModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||
return nil, nil, fmt.Errorf("unknown compression mode")
|
||||
}
|
||||
|
||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||
// Warning: This function panics if cmeta is not of the expected type.
|
||||
func (unk *unknownModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||
return nil
|
||||
}
|
||||
192
backend/compress/zstd_handler.go
Normal file
192
backend/compress/zstd_handler.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// zstdModeHandler implements compressionModeHandler for zstd
|
||||
type zstdModeHandler struct{}
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
func (z *zstdModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||
var b bytes.Buffer
|
||||
var n int64
|
||||
w, err := NewWriterSzstd(&b, zstd.WithEncoderLevel(zstd.SpeedDefault))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
n, err = io.Copy(w, r)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
|
||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||
func (z *zstdModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||
if meta.CompressionMetadataZstd == nil {
|
||||
return 0, errors.New("missing zstd metadata")
|
||||
}
|
||||
return meta.CompressionMetadataZstd.Size, nil
|
||||
}
|
||||
|
||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||
func (z *zstdModeHandler) openGetReadCloser(
|
||||
ctx context.Context,
|
||||
o *Object,
|
||||
offset int64,
|
||||
limit int64,
|
||||
cr chunkedreader.ChunkedReader,
|
||||
closer io.Closer,
|
||||
options ...fs.OpenOption,
|
||||
) (rc io.ReadCloser, err error) {
|
||||
var file io.Reader
|
||||
|
||||
if offset != 0 {
|
||||
file, err = NewReaderAtSzstd(cr, o.meta.CompressionMetadataZstd, offset)
|
||||
} else {
|
||||
file, err = zstd.NewReader(cr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fileReader io.Reader
|
||||
if limit != -1 {
|
||||
fileReader = io.LimitReader(file, limit)
|
||||
} else {
|
||||
fileReader = file
|
||||
}
|
||||
// Return a ReadCloser
|
||||
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
||||
}
|
||||
|
||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||
func (z *zstdModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||
if compressionMode == Zstd {
|
||||
return zstdFileExt
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||
func (z *zstdModeHandler) putCompress(
|
||||
ctx context.Context,
|
||||
f *Fs,
|
||||
in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
options []fs.OpenOption,
|
||||
mimeType string,
|
||||
) (fs.Object, *ObjectMetadata, error) {
|
||||
// Unwrap reader accounting
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Add the metadata hasher
|
||||
metaHasher := md5.New()
|
||||
in = io.TeeReader(in, metaHasher)
|
||||
|
||||
// Compress the file
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
resultsZstd := make(chan compressionResult[SzstdMetadata])
|
||||
go func() {
|
||||
writer, err := NewWriterSzstd(pipeWriter, zstd.WithEncoderLevel(zstd.EncoderLevel(f.opt.CompressionLevel)))
|
||||
if err != nil {
|
||||
resultsZstd <- compressionResult[SzstdMetadata]{err: err}
|
||||
close(resultsZstd)
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(writer, in)
|
||||
if wErr := writer.Close(); wErr != nil && err == nil {
|
||||
err = wErr
|
||||
}
|
||||
if cErr := pipeWriter.Close(); cErr != nil && err == nil {
|
||||
err = cErr
|
||||
}
|
||||
|
||||
resultsZstd <- compressionResult[SzstdMetadata]{err: err, meta: writer.GetMetadata()}
|
||||
close(resultsZstd)
|
||||
}()
|
||||
|
||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize))
|
||||
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
var err error
|
||||
if ht != hash.None {
|
||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
wrappedIn = wrap(wrappedIn)
|
||||
}
|
||||
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
result := <-resultsZstd
|
||||
if result.err != nil {
|
||||
if o != nil {
|
||||
_ = o.Remove(ctx)
|
||||
}
|
||||
return nil, nil, result.err
|
||||
}
|
||||
|
||||
// Build metadata using uncompressed size for filename
|
||||
meta := z.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
||||
if ht != hash.None && hasher != nil {
|
||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return o, meta, nil
|
||||
}
|
||||
|
||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||
func (z *zstdModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||
return o, z.newMetadata(o.Size(), mode, SzstdMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
||||
}
|
||||
|
||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||
// Warning: This function panics if cmeta is not of the expected type.
|
||||
func (z *zstdModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||
meta, ok := cmeta.(SzstdMetadata)
|
||||
if !ok {
|
||||
panic("invalid cmeta type: expected SzstdMetadata")
|
||||
}
|
||||
|
||||
objMeta := new(ObjectMetadata)
|
||||
objMeta.Size = size
|
||||
objMeta.Mode = mode
|
||||
objMeta.CompressionMetadataGzip = nil
|
||||
objMeta.CompressionMetadataZstd = &meta
|
||||
objMeta.MD5 = md5
|
||||
objMeta.MimeType = mimeType
|
||||
|
||||
return objMeta
|
||||
}
|
||||
@@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
encryptedSuffix: ".bin",
|
||||
}
|
||||
c.buffers.New = func() interface{} {
|
||||
c.buffers.New = func() any {
|
||||
return new([blockSize]byte)
|
||||
}
|
||||
err := c.Key(password, salt)
|
||||
@@ -336,7 +336,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
||||
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
||||
|
||||
// but we'll augment it with the nameKey for real calculation
|
||||
for i := 0; i < len(c.nameKey); i++ {
|
||||
for i := range len(c.nameKey) {
|
||||
dir += int(c.nameKey[i])
|
||||
}
|
||||
|
||||
@@ -418,7 +418,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
}
|
||||
|
||||
// add the nameKey to get the real rotate distance
|
||||
for i := 0; i < len(c.nameKey); i++ {
|
||||
for i := range len(c.nameKey) {
|
||||
dir += int(c.nameKey[i])
|
||||
}
|
||||
|
||||
@@ -664,7 +664,7 @@ func (n *nonce) increment() {
|
||||
// add a uint64 to the nonce
|
||||
func (n *nonce) add(x uint64) {
|
||||
carry := uint16(0)
|
||||
for i := 0; i < 8; i++ {
|
||||
for i := range 8 {
|
||||
digit := (*n)[i]
|
||||
xDigit := byte(x)
|
||||
x >>= 8
|
||||
|
||||
@@ -1307,10 +1307,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
end := len(ciphertext)
|
||||
if underlyingLimit >= 0 {
|
||||
end = int(underlyingOffset + underlyingLimit)
|
||||
if end > len(ciphertext) {
|
||||
end = len(ciphertext)
|
||||
}
|
||||
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext))
|
||||
}
|
||||
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
return reader, nil
|
||||
@@ -1490,7 +1487,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test truncating the file at each possible point
|
||||
for i := 0; i < len(file16)-1; i++ {
|
||||
for i := range len(file16) - 1 {
|
||||
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
|
||||
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -293,6 +294,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
// Enable ListP always
|
||||
f.features.ListP = f.ListP
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -416,11 +420,40 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := f.encryptEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
return f.encryptEntries(ctx, entries)
|
||||
listP := f.Fs.Features().ListP
|
||||
encryptedDir := f.cipher.EncryptDirName(dir)
|
||||
if listP == nil {
|
||||
entries, err := f.Fs.List(ctx, encryptedDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, encryptedDir, wrappedCallback)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -890,28 +923,30 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "encode",
|
||||
Short: "Encode the given filename(s)",
|
||||
Short: "Encode the given filename(s).",
|
||||
Long: `This encodes the filenames given as arguments returning a list of
|
||||
strings of the encoded results.
|
||||
|
||||
Usage Example:
|
||||
Usage examples:
|
||||
|
||||
rclone backend encode crypt: file1 [file2...]
|
||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend encode crypt: file1 [file2...]
|
||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||
` + "```",
|
||||
},
|
||||
{
|
||||
Name: "decode",
|
||||
Short: "Decode the given filename(s)",
|
||||
Short: "Decode the given filename(s).",
|
||||
Long: `This decodes the filenames given as arguments returning a list of
|
||||
strings of the decoded results. It will return an error if any of the
|
||||
inputs are invalid.
|
||||
|
||||
Usage Example:
|
||||
Usage examples:
|
||||
|
||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||
` + "```",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -924,7 +959,7 @@ Usage Example:
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "decode":
|
||||
out := make([]string, 0, len(arg))
|
||||
|
||||
@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
|
||||
}
|
||||
length := len(buf)
|
||||
padding := n - (length % n)
|
||||
for i := 0; i < padding; i++ {
|
||||
for range padding {
|
||||
buf = append(buf, byte(padding))
|
||||
}
|
||||
if (len(buf) % n) != 0 {
|
||||
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
|
||||
if padding == 0 {
|
||||
return nil, ErrorPaddingTooShort
|
||||
}
|
||||
for i := 0; i < padding; i++ {
|
||||
for i := range padding {
|
||||
if buf[length-1-i] != byte(padding) {
|
||||
return nil, ErrorPaddingNotAllTheSame
|
||||
}
|
||||
|
||||
38
backend/doi/api/dataversetypes.go
Normal file
38
backend/doi/api/dataversetypes.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// Type definitions specific to Dataverse
|
||||
|
||||
package api
|
||||
|
||||
// DataverseDatasetResponse is returned by the Dataverse dataset API
|
||||
type DataverseDatasetResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data DataverseDataset `json:"data"`
|
||||
}
|
||||
|
||||
// DataverseDataset is the representation of a dataset
|
||||
type DataverseDataset struct {
|
||||
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
|
||||
}
|
||||
|
||||
// DataverseDatasetVersion is the representation of a dataset version
|
||||
type DataverseDatasetVersion struct {
|
||||
LastUpdateTime string `json:"lastUpdateTime"`
|
||||
Files []DataverseFile `json:"files"`
|
||||
}
|
||||
|
||||
// DataverseFile is the representation of a file found in a dataset
|
||||
type DataverseFile struct {
|
||||
DirectoryLabel string `json:"directoryLabel"`
|
||||
DataFile DataverseDataFile `json:"dataFile"`
|
||||
}
|
||||
|
||||
// DataverseDataFile represents file metadata details
|
||||
type DataverseDataFile struct {
|
||||
ID int64 `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"contentType"`
|
||||
FileSize int64 `json:"filesize"`
|
||||
OriginalFileFormat string `json:"originalFileFormat"`
|
||||
OriginalFileSize int64 `json:"originalFileSize"`
|
||||
OriginalFileName string `json:"originalFileName"`
|
||||
MD5 string `json:"md5"`
|
||||
}
|
||||
33
backend/doi/api/inveniotypes.go
Normal file
33
backend/doi/api/inveniotypes.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Type definitions specific to InvenioRDM
|
||||
|
||||
package api
|
||||
|
||||
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
|
||||
type InvenioRecordResponse struct {
|
||||
Links InvenioRecordResponseLinks `json:"links"`
|
||||
}
|
||||
|
||||
// InvenioRecordResponseLinks represents a record's links
|
||||
type InvenioRecordResponseLinks struct {
|
||||
Self string `json:"self"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponse is the representation of a record's files
|
||||
type InvenioFilesResponse struct {
|
||||
Entries []InvenioFilesResponseEntry `json:"entries"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponseEntry is the representation of a file entry
|
||||
type InvenioFilesResponseEntry struct {
|
||||
Key string `json:"key"`
|
||||
Checksum string `json:"checksum"`
|
||||
Size int64 `json:"size"`
|
||||
Updated string `json:"updated"`
|
||||
MimeType string `json:"mimetype"`
|
||||
Links InvenioFilesResponseEntryLinks `json:"links"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponseEntryLinks represents file links details
|
||||
type InvenioFilesResponseEntryLinks struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
26
backend/doi/api/types.go
Normal file
26
backend/doi/api/types.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Package api has general type definitions for doi
|
||||
package api
|
||||
|
||||
// DoiResolverResponse is returned by the DOI resolver API
|
||||
//
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
type DoiResolverResponse struct {
|
||||
ResponseCode int `json:"responseCode"`
|
||||
Handle string `json:"handle"`
|
||||
Values []DoiResolverResponseValue `json:"values"`
|
||||
}
|
||||
|
||||
// DoiResolverResponseValue is a single handle record value
|
||||
type DoiResolverResponseValue struct {
|
||||
Index int `json:"index"`
|
||||
Type string `json:"type"`
|
||||
Data DoiResolverResponseValueData `json:"data"`
|
||||
TTL int `json:"ttl"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
// DoiResolverResponseValueData is the data held in a handle value
|
||||
type DoiResolverResponseValueData struct {
|
||||
Format string `json:"format"`
|
||||
Value any `json:"value"`
|
||||
}
|
||||
112
backend/doi/dataverse.go
Normal file
112
backend/doi/dataverse.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Implementation for Dataverse
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
|
||||
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
|
||||
queryValues := resolvedURL.Query()
|
||||
persistentID := queryValues.Get("persistentId")
|
||||
return persistentID != ""
|
||||
}
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
|
||||
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
||||
queryValues := resolvedURL.Query()
|
||||
persistentID := queryValues.Get("persistentId")
|
||||
|
||||
query := url.Values{}
|
||||
query.Add("persistentId", persistentID)
|
||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
|
||||
|
||||
return Dataverse, endpointURL, nil
|
||||
}
|
||||
|
||||
// dataverseProvider implements the doiProvider interface for Dataverse installations
|
||||
type dataverseProvider struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
||||
// Use the cache if populated
|
||||
cachedEntries, found := dp.f.cache.GetMaybe("files")
|
||||
if found {
|
||||
parsedEntries, ok := cachedEntries.([]Object)
|
||||
if ok {
|
||||
for _, entry := range parsedEntries {
|
||||
newEntry := entry
|
||||
entries = append(entries, &newEntry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
filesURL := dp.f.endpoint
|
||||
var res *http.Response
|
||||
var result api.DataverseDatasetResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
||||
Parameters: filesURL.Query(),
|
||||
}
|
||||
err = dp.f.pacer.Call(func() (bool, error) {
|
||||
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
|
||||
if modTimeErr != nil {
|
||||
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
|
||||
modTime = timeUnset
|
||||
}
|
||||
for _, file := range result.Data.LatestVersion.Files {
|
||||
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
|
||||
query := url.Values{}
|
||||
query.Add("format", "original")
|
||||
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
|
||||
entry := &Object{
|
||||
fs: dp.f,
|
||||
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
|
||||
contentURL: contentURL.String(),
|
||||
size: file.DataFile.FileSize,
|
||||
modTime: modTime,
|
||||
md5: file.DataFile.MD5,
|
||||
contentType: file.DataFile.ContentType,
|
||||
}
|
||||
if file.DataFile.OriginalFileName != "" {
|
||||
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
|
||||
entry.size = file.DataFile.OriginalFileSize
|
||||
entry.contentType = file.DataFile.OriginalFileFormat
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Populate the cache
|
||||
cacheEntries := []Object{}
|
||||
for _, entry := range entries {
|
||||
cacheEntries = append(cacheEntries, *entry)
|
||||
}
|
||||
dp.f.cache.Put("files", cacheEntries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func newDataverseProvider(f *Fs) doiProvider {
|
||||
return &dataverseProvider{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
653
backend/doi/doi.go
Normal file
653
backend/doi/doi.go
Normal file
@@ -0,0 +1,653 @@
|
||||
// Package doi provides a filesystem interface for digital objects identified by DOIs.
|
||||
//
|
||||
// See: https://www.doi.org/the-identifier/what-is-a-doi/
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/cache"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
// the URL of the DOI resolver
|
||||
//
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
doiResolverAPIURL = "https://doi.org/api"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
errorReadOnly = errors.New("doi remotes are read only")
|
||||
timeUnset = time.Unix(0, 0)
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "doi",
|
||||
Description: "DOI datasets",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "doi",
|
||||
Help: "The DOI or the doi.org URL.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: fs.ConfigProvider,
|
||||
Help: `DOI provider.
|
||||
|
||||
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "auto",
|
||||
Help: "Auto-detect provider",
|
||||
},
|
||||
{
|
||||
Value: string(Zenodo),
|
||||
Help: "Zenodo",
|
||||
}, {
|
||||
Value: string(Dataverse),
|
||||
Help: "Dataverse",
|
||||
}, {
|
||||
Value: string(Invenio),
|
||||
Help: "Invenio",
|
||||
}},
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "doi_resolver_api_url",
|
||||
Help: `The URL of the DOI resolver API to use.
|
||||
|
||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
||||
|
||||
Defaults to "https://doi.org/api".`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Provider defines the type of provider hosting the DOI
|
||||
type Provider string
|
||||
|
||||
const (
|
||||
// Zenodo provider, see https://zenodo.org
|
||||
Zenodo Provider = "zenodo"
|
||||
// Dataverse provider, see https://dataverse.harvard.edu
|
||||
Dataverse Provider = "dataverse"
|
||||
// Invenio provider, see https://inveniordm.docs.cern.ch
|
||||
Invenio Provider = "invenio"
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
|
||||
Provider string `config:"provider"` // The DOI provider
|
||||
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
provider Provider // the DOI provider
|
||||
doiProvider doiProvider // the interface used to interact with the DOI provider
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
endpoint *url.URL // the main API endpoint for this remote
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
cache *cache.Cache // a cache for the remote metadata
|
||||
}
|
||||
|
||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // the remote path
|
||||
contentURL string // the URL where the contents of the file can be downloaded
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
contentType string // content type of the object
|
||||
md5 string // MD5 hash of the object content
|
||||
}
|
||||
|
||||
// doiProvider is the interface used to list objects in a DOI
|
||||
type doiProvider interface {
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
ListEntries(ctx context.Context) (entries []*Object, err error)
|
||||
}
|
||||
|
||||
// Parse the input string as a DOI
|
||||
// Examples:
|
||||
// 10.1000/182 -> 10.1000/182
|
||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
||||
// doi:10.1000/182 -> 10.1000/182
|
||||
func parseDoi(doi string) string {
|
||||
doiURL, err := url.Parse(doi)
|
||||
if err != nil {
|
||||
return doi
|
||||
}
|
||||
if doiURL.Scheme == "doi" {
|
||||
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
|
||||
}
|
||||
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
|
||||
return strings.TrimLeft(doiURL.Path, "/")
|
||||
}
|
||||
return doi
|
||||
}
|
||||
|
||||
// Resolve a DOI to a URL
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
|
||||
resolverURL := opt.DoiResolverAPIURL
|
||||
if resolverURL == "" {
|
||||
resolverURL = doiResolverAPIURL
|
||||
}
|
||||
|
||||
var result api.DoiResolverResponse
|
||||
params := url.Values{}
|
||||
params.Add("index", "1")
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolverURL,
|
||||
Path: "/handles/" + opt.Doi,
|
||||
Parameters: params,
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.ResponseCode != 1 {
|
||||
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
|
||||
}
|
||||
resolvedURLStr := ""
|
||||
for _, value := range result.Values {
|
||||
if value.Type == "URL" && value.Data.Format == "string" {
|
||||
valueStr, ok := value.Data.Value.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
|
||||
}
|
||||
resolvedURLStr = valueStr
|
||||
}
|
||||
}
|
||||
resolvedURL, err := url.Parse(resolvedURLStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resolvedURL, nil
|
||||
}
|
||||
|
||||
// Resolve the passed configuration into a provider and enpoint
|
||||
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
|
||||
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
switch opt.Provider {
|
||||
case string(Dataverse):
|
||||
return resolveDataverseEndpoint(resolvedURL)
|
||||
case string(Invenio):
|
||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
case string(Zenodo):
|
||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
||||
}
|
||||
|
||||
hostname := strings.ToLower(resolvedURL.Hostname())
|
||||
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
|
||||
return resolveDataverseEndpoint(resolvedURL)
|
||||
}
|
||||
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
|
||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
||||
}
|
||||
if activateInvenio(ctx, srv, pacer, resolvedURL) {
|
||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
|
||||
}
|
||||
|
||||
// Make the http connection from the passed options
|
||||
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
||||
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Update f with the new parameters
|
||||
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
|
||||
f.endpoint = endpoint
|
||||
f.endpointURL = endpoint.String()
|
||||
f.provider = provider
|
||||
f.opt.Provider = string(provider)
|
||||
|
||||
switch f.provider {
|
||||
case Dataverse:
|
||||
f.doiProvider = newDataverseProvider(f)
|
||||
case Invenio, Zenodo:
|
||||
f.doiProvider = newInvenioProvider(f)
|
||||
default:
|
||||
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
|
||||
}
|
||||
|
||||
// Determine if the root is a file
|
||||
entries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.remote == f.root {
|
||||
isFile = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return isFile, nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this res and err
|
||||
// deserve to be retried. It returns the err as a convenience.
|
||||
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opt.Doi = parseDoi(opt.Doi)
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: rest.NewClient(client),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
cache: cache.New(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
isFile, err := f.httpConnection(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.root = newRoot
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name returns the configured name of the file system
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root for the filesystem
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns the URL for the filesystem
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("DOI %s", f.opt.Doi)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
// return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Remove a remote http file object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// NewObject creates a new remote http file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
entries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remoteFullPath := remote
|
||||
if f.root != "" {
|
||||
remoteFullPath = path.Join(f.root, remote)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.Remote() == remoteFullPath {
|
||||
return entry, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fileEntries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||
}
|
||||
|
||||
fullDir := path.Join(f.root, dir)
|
||||
if fullDir != "" {
|
||||
fullDir += "/"
|
||||
}
|
||||
|
||||
dirPaths := map[string]bool{}
|
||||
for _, entry := range fileEntries {
|
||||
// First, filter out files not in `fullDir`
|
||||
if !strings.HasPrefix(entry.remote, fullDir) {
|
||||
continue
|
||||
}
|
||||
// Then, find entries in subfolers
|
||||
remotePath := entry.remote
|
||||
if fullDir != "" {
|
||||
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
|
||||
}
|
||||
parts := strings.SplitN(remotePath, "/", 2)
|
||||
if len(parts) == 1 {
|
||||
newEntry := *entry
|
||||
newEntry.remote = path.Join(dir, remotePath)
|
||||
entries = append(entries, &newEntry)
|
||||
} else {
|
||||
dirPaths[path.Join(dir, parts[0])] = true
|
||||
}
|
||||
}
|
||||
|
||||
for dirPath := range dirPaths {
|
||||
entry := fs.NewDir(dirPath, time.Time{})
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote http file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// String returns the URL to the remote HTTP file
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5, nil
|
||||
}
|
||||
|
||||
// Size returns the size in bytes of the remote http file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification and access time to the specified time
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.contentURL,
|
||||
Options: options,
|
||||
}
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
|
||||
// Handle non-compliant redirects
|
||||
if res.Header.Get("Location") != "" {
|
||||
newURL, err := res.Location()
|
||||
if err == nil {
|
||||
opts.RootURL = newURL.String()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "metadata",
|
||||
Short: "Show metadata about the DOI.",
|
||||
Long: `This command returns a JSON object with some information about the DOI.
|
||||
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend metadata doi:
|
||||
` + "```" + `
|
||||
|
||||
It returns a JSON object representing metadata about the DOI.`,
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the config parameters.",
|
||||
Long: `This set command can be used to update the config parameters
|
||||
for a running doi backend.
|
||||
|
||||
Usage examples:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||
` + "```" + `
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
This rebuilds the connection to the doi backend when it is called with
|
||||
the new parameters. Only new parameters need be passed as the values
|
||||
will default to those currently in use.
|
||||
|
||||
It doesn't return anything.`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "metadata":
|
||||
return f.ShowMetadata(ctx)
|
||||
case "set":
|
||||
newOpt := f.opt
|
||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
_, err = f.httpConnection(ctx, &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updating session: %w", err)
|
||||
}
|
||||
f.opt = newOpt
|
||||
keys := []string{}
|
||||
for k := range opt {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// ShowMetadata returns some metadata about the corresponding DOI
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
|
||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := map[string]any{}
|
||||
info["DOI"] = f.opt.Doi
|
||||
info["URL"] = doiURL.String()
|
||||
info["metadataURL"] = f.endpointURL
|
||||
info["provider"] = f.provider
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
260
backend/doi/doi_internal_test.go
Normal file
260
backend/doi/doi_internal_test.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var remoteName = "TestDoi"
|
||||
|
||||
func TestParseDoi(t *testing.T) {
|
||||
// 10.1000/182 -> 10.1000/182
|
||||
doi := "10.1000/182"
|
||||
parsed := parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
||||
doi = "https://doi.org/10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// https://dx.doi.org/10.1000/182 -> 10.1000/182
|
||||
doi = "https://dxdoi.org/10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// doi:10.1000/182 -> 10.1000/182
|
||||
doi = "doi:10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// doi://10.1000/182 -> 10.1000/182
|
||||
doi = "doi://10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
}
|
||||
|
||||
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
|
||||
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Handle requests for resolving DOIs
|
||||
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are resolving a DOI
|
||||
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
|
||||
assert.NotEmpty(t, handle)
|
||||
index := r.URL.Query().Get("index")
|
||||
assert.Equal(t, "1", index)
|
||||
|
||||
// Return the most basic response
|
||||
result := api.DoiResolverResponse{
|
||||
ResponseCode: 1,
|
||||
Handle: handle,
|
||||
Values: []api.DoiResolverResponseValue{
|
||||
{
|
||||
Index: 1,
|
||||
Type: "URL",
|
||||
Data: api.DoiResolverResponseValueData{
|
||||
Format: "string",
|
||||
Value: resolvedURL,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(mux)
|
||||
|
||||
// Close the server at the end of the test
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return ts.URL + "/api"
|
||||
}
|
||||
|
||||
func md5Sum(text string) string {
|
||||
hash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
|
||||
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Handle requests for a single record
|
||||
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are returning data about a single record
|
||||
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
|
||||
assert.NotEmpty(t, recordID)
|
||||
|
||||
// Return the most basic response
|
||||
selfURL, err := url.Parse("http://" + r.Host)
|
||||
require.NoError(t, err)
|
||||
selfURL = selfURL.JoinPath(r.URL.String())
|
||||
result := api.InvenioRecordResponse{
|
||||
Links: api.InvenioRecordResponseLinks{
|
||||
Self: selfURL.String(),
|
||||
},
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// Handle requests for listing files in a record
|
||||
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Return the most basic response
|
||||
filesBaseURL, err := url.Parse("http://" + r.Host)
|
||||
require.NoError(t, err)
|
||||
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
|
||||
|
||||
entries := []api.InvenioFilesResponseEntry{}
|
||||
for filename, contents := range files {
|
||||
entries = append(entries,
|
||||
api.InvenioFilesResponseEntry{
|
||||
Key: filename,
|
||||
Checksum: md5Sum(contents),
|
||||
Size: int64(len(contents)),
|
||||
Updated: time.Now().UTC().Format(time.RFC3339),
|
||||
MimeType: "text/plain; charset=utf-8",
|
||||
Links: api.InvenioFilesResponseEntryLinks{
|
||||
Content: filesBaseURL.JoinPath(filename).String(),
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
result := api.InvenioFilesResponse{
|
||||
Entries: entries,
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// Handle requests for file contents
|
||||
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are returning the contents of a file
|
||||
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
|
||||
assert.NotEmpty(t, filename)
|
||||
contents, found := files[filename]
|
||||
if !found {
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
|
||||
// Return the most basic response
|
||||
_, err := w.Write([]byte(contents))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(mux)
|
||||
|
||||
// Close the server at the end of the test
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return ts
|
||||
}
|
||||
|
||||
func TestZenodoRemote(t *testing.T) {
|
||||
recordID := "2600782"
|
||||
doi := "10.5281/zenodo.2600782"
|
||||
|
||||
// The files in the dataset
|
||||
files := map[string]string{
|
||||
"README.md": "This is a dataset.",
|
||||
"data.txt": "Some data",
|
||||
}
|
||||
|
||||
ts := prepareMockZenodoServer(t, files)
|
||||
resolvedURL := ts.URL + "/record/" + recordID
|
||||
|
||||
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
|
||||
|
||||
testConfig := configmap.Simple{
|
||||
"type": "doi",
|
||||
"doi": doi,
|
||||
"provider": "zenodo",
|
||||
"doi_resolver_api_url": doiResolverAPIURL,
|
||||
}
|
||||
f, err := NewFs(context.Background(), remoteName, "", testConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test listing the DOI files
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
require.Equal(t, len(files), len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "README.md", e.Remote())
|
||||
assert.Equal(t, int64(18), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "data.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Test reading the DOI files
|
||||
o, err := f.NewObject(context.Background(), "README.md")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(18), o.Size())
|
||||
md5Hash, err := o.Hash(context.Background(), hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, []byte(files["README.md"]), data)
|
||||
do, ok := o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
|
||||
o, err = f.NewObject(context.Background(), "data.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
md5Hash, err = o.Hash(context.Background(), hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
|
||||
fd, err = o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, []byte(files["data.txt"]), data)
|
||||
do, ok = o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
}
|
||||
16
backend/doi/doi_test.go
Normal file
16
backend/doi/doi_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Test DOI filesystem interface
|
||||
package doi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDoi:",
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
164
backend/doi/invenio.go
Normal file
164
backend/doi/invenio.go
Normal file
@@ -0,0 +1,164 @@
|
||||
// Implementation for InvenioRDM
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
|
||||
|
||||
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
|
||||
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
|
||||
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
|
||||
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
||||
var res *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolvedURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err = srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// First, attempt to grab the API URL from the headers
|
||||
var linksetURL *url.URL
|
||||
links := parseLinkHeader(res.Header.Get("Link"))
|
||||
for _, link := range links {
|
||||
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
|
||||
parsed, err := url.Parse(link.Href)
|
||||
if err == nil {
|
||||
linksetURL = parsed
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if linksetURL != nil {
|
||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
|
||||
if err == nil {
|
||||
return Invenio, endpoint, nil
|
||||
}
|
||||
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
|
||||
}
|
||||
|
||||
// If there is no linkset header, try to grab the record ID from the URL
|
||||
recordID := ""
|
||||
resURL := res.Request.URL
|
||||
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
|
||||
if match != nil {
|
||||
recordID = match[1]
|
||||
guessedURL := res.Request.URL.ResolveReference(&url.URL{
|
||||
Path: "/api/records/" + recordID,
|
||||
})
|
||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
|
||||
if err == nil {
|
||||
return Invenio, endpoint, nil
|
||||
}
|
||||
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
|
||||
}
|
||||
|
||||
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
|
||||
var result api.InvenioRecordResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolvedURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Links.Self == "" {
|
||||
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
|
||||
}
|
||||
return url.Parse(result.Links.Self)
|
||||
}
|
||||
|
||||
// invenioProvider implements the doiProvider interface for InvenioRDM installations
|
||||
type invenioProvider struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
||||
// Use the cache if populated
|
||||
cachedEntries, found := ip.f.cache.GetMaybe("files")
|
||||
if found {
|
||||
parsedEntries, ok := cachedEntries.([]Object)
|
||||
if ok {
|
||||
for _, entry := range parsedEntries {
|
||||
newEntry := entry
|
||||
entries = append(entries, &newEntry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
filesURL := ip.f.endpoint.JoinPath("files")
|
||||
var result api.InvenioFilesResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
||||
}
|
||||
err = ip.f.pacer.Call(func() (bool, error) {
|
||||
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
for _, file := range result.Entries {
|
||||
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
|
||||
if modTimeErr != nil {
|
||||
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
|
||||
modTime = timeUnset
|
||||
}
|
||||
entry := &Object{
|
||||
fs: ip.f,
|
||||
remote: file.Key,
|
||||
contentURL: file.Links.Content,
|
||||
size: file.Size,
|
||||
modTime: modTime,
|
||||
contentType: file.MimeType,
|
||||
md5: strings.TrimPrefix(file.Checksum, "md5:"),
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Populate the cache
|
||||
cacheEntries := []Object{}
|
||||
for _, entry := range entries {
|
||||
cacheEntries = append(cacheEntries, *entry)
|
||||
}
|
||||
ip.f.cache.Put("files", cacheEntries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func newInvenioProvider(f *Fs) doiProvider {
|
||||
return &invenioProvider{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
75
backend/doi/link_header.go
Normal file
75
backend/doi/link_header.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
|
||||
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
|
||||
|
||||
// headerLink represents a link as presented in HTTP headers
|
||||
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
|
||||
type headerLink struct {
|
||||
Href string
|
||||
Rel string
|
||||
Type string
|
||||
Extras map[string]string
|
||||
}
|
||||
|
||||
func parseLinkHeader(header string) (links []headerLink) {
|
||||
for link := range strings.SplitSeq(header, ",") {
|
||||
link = strings.TrimSpace(link)
|
||||
parsed := parseLink(link)
|
||||
if parsed != nil {
|
||||
links = append(links, *parsed)
|
||||
}
|
||||
}
|
||||
return links
|
||||
}
|
||||
|
||||
func parseLink(link string) (parsedLink *headerLink) {
|
||||
var parts []string
|
||||
for part := range strings.SplitSeq(link, ";") {
|
||||
parts = append(parts, strings.TrimSpace(part))
|
||||
}
|
||||
|
||||
match := linkRegex.FindStringSubmatch(parts[0])
|
||||
if match == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := &headerLink{
|
||||
Href: match[1],
|
||||
Extras: map[string]string{},
|
||||
}
|
||||
|
||||
for _, keyValue := range parts[1:] {
|
||||
parsed := parseKeyValue(keyValue)
|
||||
if parsed != nil {
|
||||
key, value := parsed[0], parsed[1]
|
||||
switch strings.ToLower(key) {
|
||||
case "rel":
|
||||
result.Rel = value
|
||||
case "type":
|
||||
result.Type = value
|
||||
default:
|
||||
result.Extras[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseKeyValue(keyValue string) []string {
|
||||
parts := strings.SplitN(keyValue, "=", 2)
|
||||
if parts[0] == "" || len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
match := valueRegex.FindStringSubmatch(parts[1])
|
||||
if match != nil {
|
||||
parts[1] = match[1]
|
||||
return parts
|
||||
}
|
||||
return parts
|
||||
}
|
||||
44
backend/doi/link_header_internal_test.go
Normal file
44
backend/doi/link_header_internal_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseLinkHeader(t *testing.T) {
|
||||
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
|
||||
links := parseLinkHeader(header)
|
||||
expected := headerLink{
|
||||
Href: "https://zenodo.org/api/records/15063252",
|
||||
Rel: "linkset",
|
||||
Type: "application/linkset+json",
|
||||
Extras: map[string]string{},
|
||||
}
|
||||
assert.Contains(t, links, expected)
|
||||
|
||||
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
|
||||
links = parseLinkHeader(header)
|
||||
expectedList := []headerLink{{
|
||||
Href: "https://api.example.com/issues?page=2",
|
||||
Rel: "prev",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=4",
|
||||
Rel: "next",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=10",
|
||||
Rel: "last",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=1",
|
||||
Rel: "first",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}}
|
||||
assert.Equal(t, links, expectedList)
|
||||
}
|
||||
47
backend/doi/zenodo.go
Normal file
47
backend/doi/zenodo.go
Normal file
@@ -0,0 +1,47 @@
|
||||
// Implementation for Zenodo
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on Zenodo
|
||||
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
|
||||
match := zenodoRecordRegex.FindStringSubmatch(doi)
|
||||
if match == nil {
|
||||
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
|
||||
}
|
||||
|
||||
recordID := match[1]
|
||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
|
||||
|
||||
var result api.InvenioRecordResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: endpointURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
endpointURL, err = url.Parse(result.Links.Self)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return Zenodo, endpointURL, nil
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -37,8 +38,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -80,9 +81,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
driveConfig = &oauth2.Config{
|
||||
driveConfig = &oauthutil.Config{
|
||||
Scopes: []string{scopePrefix + "drive"},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -189,7 +191,7 @@ func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
scopesString = defaultScope
|
||||
}
|
||||
for _, scope := range strings.Split(scopesString, ",") {
|
||||
for scope := range strings.SplitSeq(scopesString, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
scopes = append(scopes, scopePrefix+scope)
|
||||
}
|
||||
@@ -198,13 +200,7 @@ func driveScopes(scopesString string) (scopes []string) {
|
||||
|
||||
// Returns true if one of the scopes was "drive.appfolder"
|
||||
func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
for _, scope := range scopes {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
return slices.Contains(scopes, scopePrefix+"drive.appfolder")
|
||||
}
|
||||
|
||||
func driveOAuthOptions() []fs.Option {
|
||||
@@ -958,12 +954,7 @@ func parseDrivePath(path string) (root string, err error) {
|
||||
type listFn func(*drive.File) bool
|
||||
|
||||
func containsString(slice []string, s string) bool {
|
||||
for _, e := range slice {
|
||||
if e == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(slice, s)
|
||||
}
|
||||
|
||||
// getFile returns drive.File for the ID passed and fields passed in
|
||||
@@ -1152,13 +1143,7 @@ OUTER:
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
if title != "" && title != item.Name {
|
||||
found := false
|
||||
for _, stem := range stems {
|
||||
if stem == item.Name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.Contains(stems, item.Name)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
@@ -1211,6 +1196,7 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
@@ -1221,9 +1207,11 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
}
|
||||
|
||||
func isLinkMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/x-link-")
|
||||
}
|
||||
@@ -1232,7 +1220,7 @@ func isLinkMimeType(mimeType string) bool {
|
||||
// into a list of unique extensions with leading "." and a list of associated MIME types
|
||||
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
|
||||
for _, extensionText := range extensionsIn {
|
||||
for _, extension := range strings.Split(extensionText, ",") {
|
||||
for extension := range strings.SplitSeq(extensionText, ",") {
|
||||
extension = strings.ToLower(strings.TrimSpace(extension))
|
||||
if extension == "" {
|
||||
continue
|
||||
@@ -1558,13 +1546,10 @@ func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
|
||||
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
|
||||
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
for _, space := range info.Spaces {
|
||||
if space == "photos" {
|
||||
info.Md5Checksum = ""
|
||||
info.Sha1Checksum = ""
|
||||
info.Sha256Checksum = ""
|
||||
break
|
||||
}
|
||||
if slices.Contains(info.Spaces, "photos") {
|
||||
info.Md5Checksum = ""
|
||||
info.Sha1Checksum = ""
|
||||
info.Sha256Checksum = ""
|
||||
}
|
||||
}
|
||||
o := &Object{
|
||||
@@ -1656,7 +1641,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
ctx context.Context, remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
extension, exportName, exportMimeType string, isDocument bool,
|
||||
) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
@@ -1759,7 +1745,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
|
||||
}
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
|
||||
}
|
||||
@@ -1790,7 +1776,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
|
||||
}
|
||||
dirID = actualID(dirID)
|
||||
updateInfo := &drive.File{}
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
|
||||
}
|
||||
@@ -1847,6 +1833,7 @@ func linkTemplate(mt string) *template.Template {
|
||||
})
|
||||
return _linkTemplates[mt]
|
||||
}
|
||||
|
||||
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||
fetchFormatsOnce.Do(func() {
|
||||
var about *drive.About
|
||||
@@ -1892,7 +1879,8 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", false)
|
||||
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||
extension, mimeType string, isDocument bool) {
|
||||
extension, mimeType string, isDocument bool,
|
||||
) {
|
||||
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||
if isDocument {
|
||||
for _, _extension := range f.exportExtensions {
|
||||
@@ -1977,9 +1965,28 @@ func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
entriesAdded := 0
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
@@ -1991,25 +1998,30 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return true
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entriesAdded++
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
return iErr
|
||||
}
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
|
||||
if f.isTeamDrive && entriesAdded == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// listREntry is a task to be executed by a litRRunner
|
||||
@@ -2201,7 +2213,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
wg := sync.WaitGroup{}
|
||||
in := make(chan listREntry, listRInputBuffer)
|
||||
out := make(chan error, f.ci.Checkers)
|
||||
list := walk.NewListRHelper(callback)
|
||||
list := list.NewHelper(callback)
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
|
||||
@@ -2239,7 +2251,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
wg.Add(1)
|
||||
in <- listREntry{directoryID, dir}
|
||||
|
||||
for i := 0; i < f.ci.Checkers; i++ {
|
||||
for range f.ci.Checkers {
|
||||
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
||||
}
|
||||
go func() {
|
||||
@@ -2248,11 +2260,8 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
// if the input channel overflowed add the collected entries to the channel now
|
||||
for len(overflow) > 0 {
|
||||
mu.Lock()
|
||||
l := len(overflow)
|
||||
// only fill half of the channel to prevent entries being put into overflow again
|
||||
if l > listRInputBuffer/2 {
|
||||
l = listRInputBuffer / 2
|
||||
}
|
||||
l := min(len(overflow), listRInputBuffer/2)
|
||||
wg.Add(l)
|
||||
for _, d := range overflow[:l] {
|
||||
in <- d
|
||||
@@ -2272,7 +2281,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
mu.Unlock()
|
||||
}()
|
||||
// wait until the all workers to finish
|
||||
for i := 0; i < f.ci.Checkers; i++ {
|
||||
for range f.ci.Checkers {
|
||||
e := <-out
|
||||
mu.Lock()
|
||||
// if one worker returns an error early, close the input so all other workers exit
|
||||
@@ -2688,7 +2697,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
trashedFiles := false
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@@ -2925,7 +2934,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3186,6 +3194,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -3524,14 +3533,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
return f.unTrash(ctx, dir, directoryID, true)
|
||||
}
|
||||
|
||||
// copy file with id to dest
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
// copy or move file with id to dest
|
||||
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) {
|
||||
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't find id: %w", err)
|
||||
}
|
||||
if info.MimeType == driveFolderType {
|
||||
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||
@@ -3552,9 +3561,15 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy failed: %w", err)
|
||||
|
||||
var opErr error
|
||||
if operation == "moveid" {
|
||||
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
|
||||
} else {
|
||||
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
}
|
||||
if opErr != nil {
|
||||
return fmt.Errorf("%s failed: %w", operation, opErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -3649,41 +3664,47 @@ func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error)
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
Long: `This is a get command which will be used to fetch the various drive config parameters
|
||||
Short: "Get command for fetching the drive config parameters.",
|
||||
Long: `This is a get command which will be used to fetch the various drive config
|
||||
parameters.
|
||||
|
||||
Usage Examples:
|
||||
Usage examples:
|
||||
|
||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||
` + "```",
|
||||
Opts: map[string]string{
|
||||
"chunk_size": "show the current upload chunk size",
|
||||
"service_account_file": "show the current service account file",
|
||||
"chunk_size": "Show the current upload chunk size.",
|
||||
"service_account_file": "Show the current service account file.",
|
||||
},
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the drive config parameters",
|
||||
Long: `This is a set command which will be used to update the various drive config parameters
|
||||
Short: "Set command for updating the drive config parameters.",
|
||||
Long: `This is a set command which will be used to update the various drive config
|
||||
parameters.
|
||||
|
||||
Usage Examples:
|
||||
Usage examples:
|
||||
|
||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
` + "```",
|
||||
Opts: map[string]string{
|
||||
"chunk_size": "update the current upload chunk size",
|
||||
"service_account_file": "update the current service account file",
|
||||
"chunk_size": "Update the current upload chunk size.",
|
||||
"service_account_file": "Update the current service account file.",
|
||||
},
|
||||
}, {
|
||||
Name: "shortcut",
|
||||
Short: "Create shortcuts from files or directories",
|
||||
Short: "Create shortcuts from files or directories.",
|
||||
Long: `This command creates shortcuts from files or directories.
|
||||
|
||||
Usage:
|
||||
Usage examples:
|
||||
|
||||
rclone backend shortcut drive: source_item destination_shortcut
|
||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||
` + "```console" + `
|
||||
rclone backend shortcut drive: source_item destination_shortcut
|
||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||
` + "```" + `
|
||||
|
||||
In the first example this creates a shortcut from the "source_item"
|
||||
which can be a file or a directory to the "destination_shortcut". The
|
||||
@@ -3693,90 +3714,100 @@ from "drive:"
|
||||
In the second example this creates a shortcut from the "source_item"
|
||||
relative to "drive:" to the "destination_shortcut" relative to
|
||||
"drive2:". This may fail with a permission error if the user
|
||||
authenticated with "drive2:" can't read files from "drive:".
|
||||
`,
|
||||
authenticated with "drive2:" can't read files from "drive:".`,
|
||||
Opts: map[string]string{
|
||||
"target": "optional target remote for the shortcut destination",
|
||||
"target": "Optional target remote for the shortcut destination.",
|
||||
},
|
||||
}, {
|
||||
Name: "drives",
|
||||
Short: "List the Shared Drives available to this account",
|
||||
Short: "List the Shared Drives available to this account.",
|
||||
Long: `This command lists the Shared Drives (Team Drives) available to this
|
||||
account.
|
||||
|
||||
Usage:
|
||||
Usage example:
|
||||
|
||||
rclone backend [-o config] drives drive:
|
||||
` + "```console" + `
|
||||
rclone backend [-o config] drives drive:
|
||||
` + "```" + `
|
||||
|
||||
This will return a JSON list of objects like this
|
||||
This will return a JSON list of objects like this:
|
||||
|
||||
[
|
||||
{
|
||||
"id": "0ABCDEF-01234567890",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "My Drive"
|
||||
},
|
||||
{
|
||||
"id": "0ABCDEFabcdefghijkl",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "Test Drive"
|
||||
}
|
||||
]
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"id": "0ABCDEF-01234567890",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "My Drive"
|
||||
},
|
||||
{
|
||||
"id": "0ABCDEFabcdefghijkl",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "Test Drive"
|
||||
}
|
||||
]
|
||||
` + "```" + `
|
||||
|
||||
With the -o config parameter it will output the list in a format
|
||||
suitable for adding to a config file to make aliases for all the
|
||||
drives found and a combined drive.
|
||||
|
||||
[My Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||
` + "```ini" + `
|
||||
[My Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||
|
||||
[Test Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
[Test Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
` + "```" + `
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal characters will be
|
||||
substituted with "_" and duplicate names will have numbers suffixed.
|
||||
It will also add a remote called AllDrives which shows all the shared
|
||||
drives combined into one directory tree.
|
||||
`,
|
||||
drives combined into one directory tree.`,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
Short: "Untrash files and directories",
|
||||
Short: "Untrash files and directories.",
|
||||
Long: `This command untrashes all the files and directories in the directory
|
||||
passed in recursively.
|
||||
|
||||
Usage:
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
` + "```" + `
|
||||
|
||||
This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before
|
||||
restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
{
|
||||
"Untrashed": 17,
|
||||
"Errors": 0
|
||||
}
|
||||
`,
|
||||
` + "```json" + `
|
||||
{
|
||||
"Untrashed": 17,
|
||||
"Errors": 0
|
||||
}
|
||||
` + "```",
|
||||
}, {
|
||||
Name: "copyid",
|
||||
Short: "Copy files by ID",
|
||||
Long: `This command copies files by ID
|
||||
Short: "Copy files by ID.",
|
||||
Long: `This command copies files by ID.
|
||||
|
||||
Usage:
|
||||
Usage examples:
|
||||
|
||||
rclone backend copyid drive: ID path
|
||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||
` + "```console" + `
|
||||
rclone backend copyid drive: ID path
|
||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||
` + "```" + `
|
||||
|
||||
It copies the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone copyto). The ID and path pairs can be
|
||||
@@ -3789,58 +3820,89 @@ component will be used as the file name.
|
||||
If the destination is a drive backend then server-side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
`,
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before
|
||||
copying.`,
|
||||
}, {
|
||||
Name: "moveid",
|
||||
Short: "Move files by ID.",
|
||||
Long: `This command moves files by ID.
|
||||
|
||||
Usage examples:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend moveid drive: ID path
|
||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||
` + "```" + `
|
||||
|
||||
It moves the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone moveto).
|
||||
|
||||
The path should end with a / to indicate move the file as named to
|
||||
this directory. If it doesn't end with a / then the last path
|
||||
component will be used as the file name.
|
||||
|
||||
If the destination is a drive backend then server-side moving will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
Short: "Dump the export formats for debug purposes",
|
||||
Short: "Dump the export formats for debug purposes.",
|
||||
}, {
|
||||
Name: "importformats",
|
||||
Short: "Dump the import formats for debug purposes",
|
||||
Short: "Dump the import formats for debug purposes.",
|
||||
}, {
|
||||
Name: "query",
|
||||
Short: "List files using Google Drive query language",
|
||||
Long: `This command lists files based on a query
|
||||
Short: "List files using Google Drive query language.",
|
||||
Long: `This command lists files based on a query.
|
||||
|
||||
Usage:
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend query drive: query
|
||||
` + "```" + `
|
||||
|
||||
rclone backend query drive: query
|
||||
|
||||
The query syntax is documented at [Google Drive Search query terms and
|
||||
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
|
||||
|
||||
For example:
|
||||
|
||||
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
||||
` + "```console" + `
|
||||
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
||||
` + "```" + `
|
||||
|
||||
If the query contains literal ' or \ characters, these need to be escaped with
|
||||
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
|
||||
file named "foo ' \.txt":
|
||||
|
||||
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
||||
` + "```console" + `
|
||||
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
||||
` + "```" + `
|
||||
|
||||
The result is a JSON array of matches, for example:
|
||||
|
||||
[
|
||||
{
|
||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
||||
"mimeType": "text/plain",
|
||||
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
||||
"name": "foo ' \\.txt",
|
||||
"parents": [
|
||||
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
||||
],
|
||||
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
||||
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
||||
"size": "311",
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]`,
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
||||
"mimeType": "text/plain",
|
||||
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
||||
"name": "foo ' \\.txt",
|
||||
"parents": [
|
||||
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
||||
],
|
||||
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
||||
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
||||
"size": "311",
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]
|
||||
` + "```console",
|
||||
}, {
|
||||
Name: "rescue",
|
||||
Short: "Rescue or delete any orphaned files",
|
||||
Short: "Rescue or delete any orphaned files.",
|
||||
Long: `This command rescues or deletes any orphaned files or directories.
|
||||
|
||||
Sometimes files can get orphaned in Google Drive. This means that they
|
||||
@@ -3849,26 +3911,31 @@ are no longer in any folder in Google Drive.
|
||||
This command finds those files and either rescues them to a directory
|
||||
you specify or deletes them.
|
||||
|
||||
Usage:
|
||||
|
||||
This can be used in 3 ways.
|
||||
|
||||
First, list all orphaned files
|
||||
First, list all orphaned files:
|
||||
|
||||
rclone backend rescue drive:
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive:
|
||||
` + "```" + `
|
||||
|
||||
Second rescue all orphaned files to the directory indicated
|
||||
Second rescue all orphaned files to the directory indicated:
|
||||
|
||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||
` + "```" + `
|
||||
|
||||
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
||||
E.g. to rescue all orphans to a directory called "Orphans" in the top level:
|
||||
|
||||
rclone backend rescue drive: Orphans
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive: Orphans
|
||||
` + "```" + `
|
||||
|
||||
Third delete all orphaned files to the trash
|
||||
Third delete all orphaned files to the trash:
|
||||
|
||||
rclone backend rescue drive: -o delete
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive: -o delete
|
||||
` + "```",
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -3880,7 +3947,7 @@ Third delete all orphaned files to the trash
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "get":
|
||||
out := make(map[string]string)
|
||||
@@ -3969,16 +4036,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
dir = arg[0]
|
||||
}
|
||||
return f.unTrashDir(ctx, dir, true)
|
||||
case "copyid":
|
||||
case "copyid", "moveid":
|
||||
if len(arg)%2 != 0 {
|
||||
return nil, errors.New("need an even number of arguments")
|
||||
}
|
||||
for len(arg) > 0 {
|
||||
id, dest := arg[0], arg[1]
|
||||
arg = arg[2:]
|
||||
err = f.copyID(ctx, id, dest)
|
||||
err = f.copyOrMoveID(ctx, name, id, dest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
|
||||
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
@@ -3989,14 +4056,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
case "query":
|
||||
if len(arg) == 1 {
|
||||
query := arg[0]
|
||||
var results, err = f.query(ctx, query)
|
||||
results, err := f.query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
return nil, errors.New("need a query argument")
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
@@ -4056,6 +4122,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
@@ -4070,7 +4137,8 @@ func (o *baseObject) Size() int64 {
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error,
|
||||
) {
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
@@ -4283,12 +4351,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
// the HEAD in the listing to this GET. This stops rclone marking
|
||||
// the transfer as corrupted.
|
||||
var offset, end int64 = 0, -1
|
||||
var newOptions = options[:0]
|
||||
newOptions := options[:0]
|
||||
for _, o := range options {
|
||||
// Note that Range requests don't work on Google docs:
|
||||
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
|
||||
@@ -4315,9 +4384,10 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
var data = o.content
|
||||
data := o.content
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -4342,7 +4412,8 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
src fs.ObjectInfo,
|
||||
) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
@@ -4420,6 +4491,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
importMimeType := ""
|
||||
@@ -4515,6 +4587,7 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
func (o *linkObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
@@ -4598,6 +4671,7 @@ var (
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
|
||||
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
|
||||
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
obj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("BadID", func(t *testing.T) {
|
||||
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "couldn't find id")
|
||||
})
|
||||
@@ -506,19 +506,31 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
t.Run("Directory", func(t *testing.T) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
require.NoError(t, err)
|
||||
err = f.copyID(ctx, rootID, dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't copy directory")
|
||||
assert.Contains(t, err.Error(), "can't moveid directory")
|
||||
})
|
||||
|
||||
t.Run("WithoutDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/")
|
||||
t.Run("MoveWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("WithDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||
t.Run("CopyWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("MoveWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
|
||||
t.Run("CopyWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
@@ -647,7 +659,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
|
||||
t.Run("Query", f.InternalTestQuery)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -324,9 +325,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
||||
metadata := make(fs.Metadata, 16)
|
||||
|
||||
// Dump user metadata first as it overrides system metadata
|
||||
for k, v := range info.Properties {
|
||||
metadata[k] = v
|
||||
}
|
||||
maps.Copy(metadata, info.Properties)
|
||||
|
||||
// System metadata
|
||||
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
|
||||
@@ -387,7 +386,6 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
||||
g.SetLimit(o.fs.ci.Checkers)
|
||||
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
||||
for _, permissionID := range info.PermissionIds {
|
||||
permissionID := permissionID
|
||||
g.Go(func() error {
|
||||
// must fetch the team drive ones individually to check the inherited flag
|
||||
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
||||
@@ -508,7 +506,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
|
||||
//
|
||||
// It returns a callback which should be called to finish the updates
|
||||
// after the data is uploaded.
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
|
||||
callbackFns := []updateMetadataFn{}
|
||||
callback = func(ctx context.Context, info *drive.File) error {
|
||||
for _, fn := range callbackFns {
|
||||
@@ -521,7 +519,6 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k, v := k, v
|
||||
// parse a boolean from v and write into out
|
||||
parseBool := func(out *bool) error {
|
||||
b, err := strconv.ParseBool(v)
|
||||
@@ -533,7 +530,9 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
switch k {
|
||||
case "copy-requires-writer-permission":
|
||||
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
if isFolder {
|
||||
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
|
||||
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "writers-can-share":
|
||||
@@ -630,7 +629,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
||||
}
|
||||
|
||||
@@ -177,10 +177,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
if start >= rx.ContentLength {
|
||||
break
|
||||
}
|
||||
reqSize = rx.ContentLength - start
|
||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||
reqSize = int64(rx.f.opt.ChunkSize)
|
||||
}
|
||||
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize))
|
||||
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
} else {
|
||||
// If size unknown read into buffer
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
@@ -21,14 +20,10 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -55,10 +55,7 @@ func (d *digest) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
for len(p) > 0 {
|
||||
d.writtenMore = true
|
||||
toWrite := bytesPerBlock - d.n
|
||||
if toWrite > len(p) {
|
||||
toWrite = len(p)
|
||||
}
|
||||
toWrite := min(bytesPerBlock-d.n, len(p))
|
||||
_, err = d.blockHash.Write(p[:toWrite])
|
||||
if err != nil {
|
||||
panic(hashReturnedError)
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
func testChunk(t *testing.T, chunk int) {
|
||||
data := make([]byte, chunk)
|
||||
for i := 0; i < chunk; i++ {
|
||||
for i := range chunk {
|
||||
data[i] = 'A'
|
||||
}
|
||||
for _, test := range []struct {
|
||||
|
||||
@@ -47,6 +47,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -91,9 +93,12 @@ const (
|
||||
maxFileNameLength = 255
|
||||
)
|
||||
|
||||
type exportAPIFormat string
|
||||
type exportExtension string // dotless
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
dropboxConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"files.metadata.write",
|
||||
"files.content.write",
|
||||
@@ -108,7 +113,8 @@ var (
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
|
||||
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -130,10 +136,20 @@ var (
|
||||
DefaultTimeoutAsync: 10 * time.Second,
|
||||
DefaultBatchSizeAsync: 100,
|
||||
}
|
||||
|
||||
exportKnownAPIFormats = map[exportAPIFormat]exportExtension{
|
||||
"markdown": "md",
|
||||
"html": "html",
|
||||
}
|
||||
// Populated based on exportKnownAPIFormats
|
||||
exportKnownExtensions = map[exportExtension]exportAPIFormat{}
|
||||
|
||||
paperExtension = ".paper"
|
||||
paperTemplateExtension = ".papert"
|
||||
)
|
||||
|
||||
// Gets an oauth config with the right scopes
|
||||
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
|
||||
// If not impersonating, use standard scopes
|
||||
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
|
||||
return dropboxConfig
|
||||
@@ -245,23 +261,61 @@ folders.`,
|
||||
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
}, {
|
||||
Name: "export_formats",
|
||||
Help: `Comma separated list of preferred formats for exporting files
|
||||
|
||||
Certain Dropbox files can only be accessed by exporting them to another format.
|
||||
These include Dropbox Paper documents.
|
||||
|
||||
For each such file, rclone will choose the first format on this list that Dropbox
|
||||
considers valid. If none is valid, it will choose Dropbox's default format.
|
||||
|
||||
Known formats include: "html", "md" (markdown)`,
|
||||
Default: fs.CommaSepList{"html", "md"},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_exports",
|
||||
Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "show_all_exports",
|
||||
Default: false,
|
||||
Help: `Show all exportable files in listings.
|
||||
|
||||
Adding this flag will allow all exportable files to be server side copied.
|
||||
Note that rclone doesn't add extensions to the exportable file names in this mode.
|
||||
|
||||
Do **not** use this flag when trying to download exportable files - rclone
|
||||
will fail to download them.
|
||||
`,
|
||||
Advanced: true,
|
||||
},
|
||||
}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
})
|
||||
|
||||
for apiFormat, ext := range exportKnownAPIFormats {
|
||||
exportKnownExtensions[ext] = apiFormat
|
||||
}
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
ExportFormats fs.CommaSepList `config:"export_formats"`
|
||||
SkipExports bool `config:"skip_exports"`
|
||||
ShowAllExports bool `config:"show_all_exports"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -281,8 +335,18 @@ type Fs struct {
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
|
||||
exportExts []exportExtension
|
||||
}
|
||||
|
||||
type exportType int
|
||||
|
||||
const (
|
||||
notExport exportType = iota // a regular file
|
||||
exportHide // should be hidden
|
||||
exportListOnly // listable, but can't export
|
||||
exportExportable // can export
|
||||
)
|
||||
|
||||
// Object describes a dropbox object
|
||||
//
|
||||
// Dropbox Objects always have full metadata
|
||||
@@ -294,6 +358,9 @@ type Object struct {
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hash string // content_hash of the object
|
||||
|
||||
exportType exportType
|
||||
exportAPIFormat exportAPIFormat
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -316,32 +383,46 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Some specific errors which should be excluded from retries
|
||||
func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, err
|
||||
}
|
||||
errString := err.Error()
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// First check for specific errors
|
||||
//
|
||||
// These come back from the SDK in a whole host of different
|
||||
// error types, but there doesn't seem to be a consistent way
|
||||
// of reading the error cause, so here we just check using the
|
||||
// error string which isn't perfect but does the job.
|
||||
errString := err.Error()
|
||||
if strings.Contains(errString, "insufficient_space") {
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if strings.Contains(errString, "malformed_path") {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// Then handle any official Retry-After header from Dropbox's SDK
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
errString := err.Error()
|
||||
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
||||
return true, err
|
||||
}
|
||||
@@ -420,6 +501,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
|
||||
for _, e := range opt.ExportFormats {
|
||||
ext := exportExtension(e)
|
||||
if exportKnownExtensions[ext] == "" {
|
||||
return nil, fmt.Errorf("dropbox: unknown export format '%s'", e)
|
||||
}
|
||||
f.exportExts = append(f.exportExts, ext)
|
||||
}
|
||||
|
||||
// unauthorized config for endpoints that fail with auth
|
||||
ucfg := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
@@ -572,38 +661,126 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
}
|
||||
|
||||
type getMetadataResult struct {
|
||||
entry files.IsMetadata
|
||||
notFound bool
|
||||
err error
|
||||
}
|
||||
|
||||
// getMetadata gets the metadata for a file or directory
|
||||
func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) {
|
||||
res.err = f.pacer.Call(func() (bool, error) {
|
||||
res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
Path: f.opt.Enc.FromStandardPath(objPath),
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
return shouldRetry(ctx, res.err)
|
||||
})
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
if res.err != nil {
|
||||
switch e := res.err.(type) {
|
||||
case files.GetMetadataAPIError:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
notFound = true
|
||||
err = nil
|
||||
res.notFound = true
|
||||
res.err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getFileMetadata gets the metadata for a file
|
||||
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Get metadata such that the result would be exported with the given extension
|
||||
// Return a channel that will eventually receive the metadata
|
||||
func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult {
|
||||
ch := make(chan getMetadataResult, 1)
|
||||
wantDownloadable := (wantExportExtension == "")
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
res := f.getMetadata(ctx, filePath)
|
||||
info, ok := res.entry.(*files.FileMetadata)
|
||||
if !ok { // Can't check anything about file, just return what we have
|
||||
ch <- res
|
||||
return
|
||||
}
|
||||
|
||||
// Return notFound if downloadability or extension doesn't match
|
||||
if wantDownloadable != info.IsDownloadable {
|
||||
ch <- getMetadataResult{notFound: true}
|
||||
return
|
||||
}
|
||||
if !info.IsDownloadable {
|
||||
_, ext := f.chooseExportFormat(info)
|
||||
if ext != wantExportExtension {
|
||||
ch <- getMetadataResult{notFound: true}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Return our real result or error
|
||||
ch <- res
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// For a given rclone-path, figure out what the Dropbox-path may be, in order of preference.
|
||||
// Multiple paths might be plausible, due to export path munging.
|
||||
func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) {
|
||||
ret = []<-chan getMetadataResult{}
|
||||
|
||||
// Prefer an exact match
|
||||
ret = append(ret, f.getMetadataForExt(ctx, filePath, ""))
|
||||
|
||||
// Check if we're plausibly an export path, otherwise we're done
|
||||
if f.opt.SkipExports || f.opt.ShowAllExports {
|
||||
return
|
||||
}
|
||||
if notFound {
|
||||
dotted := path.Ext(filePath)
|
||||
if dotted == "" {
|
||||
return
|
||||
}
|
||||
ext := exportExtension(dotted[1:])
|
||||
if exportKnownExtensions[ext] == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// We might be an export path! Try all possibilities
|
||||
base := strings.TrimSuffix(filePath, dotted)
|
||||
|
||||
// `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper`
|
||||
if strings.HasSuffix(base, paperTemplateExtension) {
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper`
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext))
|
||||
return
|
||||
}
|
||||
|
||||
// getFileMetadata gets the metadata for a file
|
||||
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) {
|
||||
var res getMetadataResult
|
||||
|
||||
// Try all possible metadatas
|
||||
possibleMetadatas := f.possibleMetadatas(ctx, filePath)
|
||||
for _, ch := range possibleMetadatas {
|
||||
res = <-ch
|
||||
|
||||
if res.err != nil {
|
||||
return nil, res.err
|
||||
}
|
||||
if !res.notFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if res.notFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
fileInfo, ok := entry.(*files.FileMetadata)
|
||||
|
||||
fileInfo, ok := res.entry.(*files.FileMetadata)
|
||||
if !ok {
|
||||
if _, ok = entry.(*files.FolderMetadata); ok {
|
||||
if _, ok = res.entry.(*files.FolderMetadata); ok {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
@@ -612,15 +789,15 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
|
||||
}
|
||||
|
||||
// getDirMetadata gets the metadata for a directory
|
||||
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(ctx, dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) {
|
||||
res := f.getMetadata(ctx, dirPath)
|
||||
if res.err != nil {
|
||||
return nil, res.err
|
||||
}
|
||||
if notFound {
|
||||
if res.notFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
dirInfo, ok := entry.(*files.FolderMetadata)
|
||||
dirInfo, ok := res.entry.(*files.FolderMetadata)
|
||||
if !ok {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
@@ -658,7 +835,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// listSharedFolders lists all available shared folders mounted and not mounted
|
||||
// we'll need the id later so we have to return them in original format
|
||||
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
|
||||
started := false
|
||||
var res *sharing.ListFoldersResult
|
||||
for {
|
||||
@@ -671,7 +848,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
@@ -683,15 +860,15 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
err = callback(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
@@ -699,21 +876,25 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// findSharedFolder find the id for a given shared folder name
|
||||
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||
// so our only option is to iterate over all shared folders
|
||||
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
|
||||
entries, err := f.listSharedFolders(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
errFoundFile := errors.New("found file")
|
||||
err = f.listSharedFolders(ctx, func(entry fs.DirEntry) error {
|
||||
if entry.(*fs.Dir).Remote() == name {
|
||||
return entry.(*fs.Dir).ID(), nil
|
||||
id = entry.(*fs.Dir).ID()
|
||||
return errFoundFile
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if errors.Is(err, errFoundFile) {
|
||||
return id, nil
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", fs.ErrorDirNotFound
|
||||
}
|
||||
@@ -732,7 +913,7 @@ func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
|
||||
|
||||
// listReceivedFiles lists shared the user as access to (note this means individual
|
||||
// files not files contained in shared folders)
|
||||
func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
|
||||
started := false
|
||||
var res *sharing.ListFilesResult
|
||||
for {
|
||||
@@ -745,7 +926,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
@@ -757,7 +938,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -770,26 +951,33 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
modTime: *entry.TimeInvited,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
err = callback(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
|
||||
files, err := f.listReceivedFiles(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range files {
|
||||
errFoundFile := errors.New("found file")
|
||||
err = f.listReceivedFiles(ctx, func(entry fs.DirEntry) error {
|
||||
if entry.(*Object).remote == name {
|
||||
return entry.(*Object), nil
|
||||
o = entry.(*Object)
|
||||
return errFoundFile
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if errors.Is(err, errFoundFile) {
|
||||
return o, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -804,11 +992,37 @@ func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err er
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
list := list.NewHelper(callback)
|
||||
if f.opt.SharedFiles {
|
||||
return f.listReceivedFiles(ctx)
|
||||
err := f.listReceivedFiles(ctx, list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
if f.opt.SharedFolders {
|
||||
return f.listSharedFolders(ctx)
|
||||
err := f.listSharedFolders(ctx, list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
root := f.slashRoot
|
||||
@@ -820,16 +1034,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var res *files.ListFolderResult
|
||||
for {
|
||||
if !started {
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(root),
|
||||
Recursive: false,
|
||||
Limit: 1000,
|
||||
}
|
||||
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root))
|
||||
arg.Recursive = false
|
||||
arg.Limit = 1000
|
||||
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
res, err = f.srv.ListFolder(arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -839,7 +1052,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
@@ -851,7 +1064,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -876,20 +1089,28 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
err = list.Add(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if o.(*Object).exportType.listable() {
|
||||
err = list.Add(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
if !res.HasMore {
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Put the object
|
||||
@@ -968,16 +1189,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
}
|
||||
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: encRoot,
|
||||
Recursive: false,
|
||||
}
|
||||
arg := files.NewListFolderArg(encRoot)
|
||||
arg.Recursive = false
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
var res *files.ListFolderResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
res, err = f.srv.ListFolder(arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1020,13 +1239,20 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
@@ -1040,7 +1266,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.CopyV2(&arg)
|
||||
@@ -1105,6 +1330,16 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.MoveV2(&arg)
|
||||
switch e := err.(type) {
|
||||
case files.MoveV2APIError:
|
||||
// There seems to be a bit of eventual consistency here which causes this to
|
||||
// fail on just created objects
|
||||
// See: https://github.com/rclone/rclone/issues/8881
|
||||
if e.EndpointError != nil && e.EndpointError.FromLookup != nil && e.EndpointError.FromLookup.Tag == files.LookupErrorNotFound {
|
||||
fs.Debugf(srcObj, "Retrying move on %v error", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1152,6 +1387,16 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
|
||||
// Some plans can't create links with expiry
|
||||
fs.Debugf(absPath, "can't create link with expiry, trying without")
|
||||
createArg.Settings.Expires = nil
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
@@ -1255,9 +1500,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
}
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(int64(used)), // bytes in use
|
||||
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
|
||||
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(used), // bytes in use
|
||||
Free: fs.NewUsageValue(total - used), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
@@ -1316,16 +1561,14 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
|
||||
var startCursor *files.ListFolderGetLatestCursorResult
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
|
||||
Recursive: true,
|
||||
}
|
||||
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(f.slashRoot))
|
||||
arg.Recursive = true
|
||||
|
||||
if arg.Path == "/" {
|
||||
arg.Path = ""
|
||||
}
|
||||
|
||||
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
|
||||
startCursor, err = f.srv.ListFolderGetLatestCursor(arg)
|
||||
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -1429,8 +1672,50 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) chooseExportFormat(info *files.FileMetadata) (exportAPIFormat, exportExtension) {
|
||||
// Find API export formats Dropbox supports for this file
|
||||
// Sometimes Dropbox lists a format in ExportAs but not ExportOptions, so check both
|
||||
ei := info.ExportInfo
|
||||
dropboxFormatStrings := append([]string{ei.ExportAs}, ei.ExportOptions...)
|
||||
|
||||
// Find which extensions these correspond to
|
||||
exportExtensions := map[exportExtension]exportAPIFormat{}
|
||||
var dropboxPreferredAPIFormat exportAPIFormat
|
||||
var dropboxPreferredExtension exportExtension
|
||||
for _, format := range dropboxFormatStrings {
|
||||
apiFormat := exportAPIFormat(format)
|
||||
// Only consider formats we know about
|
||||
if ext, ok := exportKnownAPIFormats[apiFormat]; ok {
|
||||
if dropboxPreferredAPIFormat == "" {
|
||||
dropboxPreferredAPIFormat = apiFormat
|
||||
dropboxPreferredExtension = ext
|
||||
}
|
||||
exportExtensions[ext] = apiFormat
|
||||
}
|
||||
}
|
||||
|
||||
// See if the user picked a valid extension
|
||||
for _, ext := range f.exportExts {
|
||||
if apiFormat, ok := exportExtensions[ext]; ok {
|
||||
return apiFormat, ext
|
||||
}
|
||||
}
|
||||
|
||||
// If no matches, prefer the first valid format Dropbox lists
|
||||
return dropboxPreferredAPIFormat, dropboxPreferredExtension
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func (et exportType) listable() bool {
|
||||
return et != exportHide
|
||||
}
|
||||
|
||||
// something we should _try_ to export
|
||||
func (et exportType) exportable() bool {
|
||||
return et == exportExportable || et == exportListOnly
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
@@ -1474,6 +1759,32 @@ func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
func (o *Object) setMetadataForExport(info *files.FileMetadata) {
|
||||
o.bytes = -1
|
||||
o.hash = ""
|
||||
|
||||
if o.fs.opt.SkipExports {
|
||||
o.exportType = exportHide
|
||||
return
|
||||
}
|
||||
if o.fs.opt.ShowAllExports {
|
||||
o.exportType = exportListOnly
|
||||
return
|
||||
}
|
||||
|
||||
var exportExt exportExtension
|
||||
o.exportAPIFormat, exportExt = o.fs.chooseExportFormat(info)
|
||||
if o.exportAPIFormat == "" {
|
||||
o.exportType = exportHide
|
||||
} else {
|
||||
o.exportType = exportExportable
|
||||
// get rid of any paper extension, if present
|
||||
o.remote = strings.TrimSuffix(o.remote, paperExtension)
|
||||
// add the export extension
|
||||
o.remote += "." + string(exportExt)
|
||||
}
|
||||
}
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inaccurate date
|
||||
@@ -1482,6 +1793,10 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||
o.bytes = int64(info.Size)
|
||||
o.modTime = info.ClientModified
|
||||
o.hash = info.ContentHash
|
||||
|
||||
if !info.IsDownloadable {
|
||||
o.setMetadataForExport(info)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1545,6 +1860,27 @@ func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) export(ctx context.Context) (in io.ReadCloser, err error) {
|
||||
if o.exportType == exportListOnly || o.exportAPIFormat == "" {
|
||||
fs.Debugf(o.remote, "No export format found")
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
arg := files.ExportArg{Path: o.id, ExportFormat: string(o.exportAPIFormat)}
|
||||
var exportResult *files.ExportResult
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
exportResult, in, err = o.fs.srv.Export(&arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.bytes = int64(exportResult.ExportMetadata.Size)
|
||||
o.hash = exportResult.ExportMetadata.ExportHash
|
||||
return
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.SharedFiles {
|
||||
@@ -1564,6 +1900,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return
|
||||
}
|
||||
|
||||
if o.exportType.exportable() {
|
||||
return o.export(ctx)
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{
|
||||
@@ -1692,14 +2032,10 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1805,6 +2141,7 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = (*Object)(nil)
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalCheckPathLength(t *testing.T) {
|
||||
@@ -42,3 +49,54 @@ func TestInternalCheckPathLength(t *testing.T) {
|
||||
assert.Equal(t, test.ok, err == nil, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) importPaperForTest(t *testing.T) {
|
||||
content := `# test doc
|
||||
|
||||
Lorem ipsum __dolor__ sit amet
|
||||
[link](http://google.com)
|
||||
`
|
||||
|
||||
arg := files.PaperCreateArg{
|
||||
Path: f.slashRootSlash + "export.paper",
|
||||
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
|
||||
}
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
reader := strings.NewReader(content)
|
||||
_, err = f.srv.PaperCreate(&arg, reader)
|
||||
return shouldRetry(context.Background(), err)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestPaperExport(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f.importPaperForTest(t)
|
||||
|
||||
f.exportExts = []exportExtension{"html"}
|
||||
|
||||
obj, err := f.NewObject(ctx, "export.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
buf, err := io.ReadAll(rc)
|
||||
require.NoError(t, err)
|
||||
text := string(buf)
|
||||
|
||||
for _, excerpt := range []string{
|
||||
"Lorem ipsum",
|
||||
"<b>dolor</b>",
|
||||
`href="http://google.com"`,
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PaperExport", f.InternalTestPaperExport)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
|
||||
|
||||
// fields returns the JSON fields in use by opt as a | separated
|
||||
// string.
|
||||
func fields(opt interface{}) (pipeTags string, err error) {
|
||||
func fields(opt any) (pipeTags string, err error) {
|
||||
var tags []string
|
||||
def := reflect.ValueOf(opt)
|
||||
defType := def.Type()
|
||||
for i := 0; i < def.NumField(); i++ {
|
||||
for i := range def.NumField() {
|
||||
field := defType.Field(i)
|
||||
tag, ok := field.Tag.Lookup("json")
|
||||
if !ok {
|
||||
@@ -239,7 +239,7 @@ func fields(opt interface{}) (pipeTags string, err error) {
|
||||
|
||||
// mustFields returns the JSON fields in use by opt as a | separated
|
||||
// string. It panics on failure.
|
||||
func mustFields(opt interface{}) string {
|
||||
func mustFields(opt any) string {
|
||||
tags, err := fields(opt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -351,12 +351,12 @@ type SpaceInfo struct {
|
||||
// DeleteResponse is returned from doDeleteFile
|
||||
type DeleteResponse struct {
|
||||
Status
|
||||
Deleted []string `json:"deleted"`
|
||||
Errors []interface{} `json:"errors"`
|
||||
ID string `json:"fi_id"`
|
||||
BackgroundTask int `json:"backgroundtask"`
|
||||
UsSize string `json:"us_size"`
|
||||
PaSize string `json:"pa_size"`
|
||||
Deleted []string `json:"deleted"`
|
||||
Errors []any `json:"errors"`
|
||||
ID string `json:"fi_id"`
|
||||
BackgroundTask int `json:"backgroundtask"`
|
||||
UsSize string `json:"us_size"`
|
||||
PaSize string `json:"pa_size"`
|
||||
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||
}
|
||||
|
||||
|
||||
@@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
||||
}
|
||||
|
||||
// params for rpc
|
||||
type params map[string]interface{}
|
||||
type params map[string]any
|
||||
|
||||
// rpc calls the rpc.php method of the SME file fabric
|
||||
//
|
||||
|
||||
81
backend/filelu/api/types.go
Normal file
81
backend/filelu/api/types.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// Package api defines types for interacting with the FileLu API.
|
||||
package api
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// CreateFolderResponse represents the response for creating a folder.
|
||||
type CreateFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
FldID any `json:"fld_id"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// DeleteFolderResponse represents the response for deleting a folder.
|
||||
type DeleteFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
// FolderListResponse represents the response for listing folders.
|
||||
type FolderListResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
FldID json.Number `json:"fld_id"`
|
||||
Path string `json:"path"`
|
||||
FileCode string `json:"file_code"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"files"`
|
||||
Folders []struct {
|
||||
Name string `json:"name"`
|
||||
FldID json.Number `json:"fld_id"`
|
||||
Path string `json:"path"`
|
||||
} `json:"folders"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// FileDirectLinkResponse represents the response for a direct link to a file.
|
||||
type FileDirectLinkResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
URL string `json:"url"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// FileInfoResponse represents the response for file information.
|
||||
type FileInfoResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result []struct {
|
||||
Size string `json:"size"`
|
||||
Name string `json:"name"`
|
||||
FileCode string `json:"filecode"`
|
||||
Hash string `json:"hash"`
|
||||
Status int `json:"status"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// DeleteFileResponse represents the response for deleting a file.
|
||||
type DeleteFileResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
// AccountInfoResponse represents the response for account information.
|
||||
type AccountInfoResponse struct {
|
||||
Status int `json:"status"` // HTTP status code of the response.
|
||||
Msg string `json:"msg"` // Message describing the response.
|
||||
Result struct {
|
||||
PremiumExpire string `json:"premium_expire"` // Expiration date of premium access.
|
||||
Email string `json:"email"` // User's email address.
|
||||
UType string `json:"utype"` // User type (e.g., premium or free).
|
||||
Storage string `json:"storage"` // Total storage available to the user.
|
||||
StorageUsed string `json:"storage_used"` // Amount of storage used.
|
||||
} `json:"result"` // Nested result structure containing account details.
|
||||
}
|
||||
366
backend/filelu/filelu.go
Normal file
366
backend/filelu/filelu.go
Normal file
@@ -0,0 +1,366 @@
|
||||
// Package filelu provides an interface to the FileLu storage system.
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Register the backend with Rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filelu",
|
||||
Description: "FileLu Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "key",
|
||||
Help: "Your FileLu Rclone key from My Account",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeExclamation |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeSingleQuote |
|
||||
encoder.EncodeBackQuote |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeDollar |
|
||||
encoder.EncodeColon |
|
||||
encoder.EncodeAsterisk |
|
||||
encoder.EncodePipe |
|
||||
encoder.EncodeHash |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftPeriod |
|
||||
encoder.EncodeLeftTilde |
|
||||
encoder.EncodeLeftCrLfHtVt |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeSquareBracket |
|
||||
encoder.EncodeSemicolon |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeDot),
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
// Options defines the configuration for the FileLu backend
|
||||
type Options struct {
|
||||
Key string `config:"key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents the FileLu file system
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpoint string
|
||||
pacer *pacer.Pacer
|
||||
srv *rest.Client
|
||||
client *http.Client
|
||||
targetFile string
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object for FileLu
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||
}
|
||||
|
||||
if opt.Key == "" {
|
||||
return nil, fmt.Errorf("FileLu Rclone Key is required")
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
if strings.TrimSpace(root) == "" {
|
||||
root = ""
|
||||
}
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
filename := ""
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
endpoint: "https://filelu.com/rclone",
|
||||
client: client,
|
||||
srv: rest.NewClient(client).SetRoot("https://filelu.com/rclone"),
|
||||
pacer: pacer.New(),
|
||||
targetFile: filename,
|
||||
root: root,
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
WriteMetadata: false,
|
||||
SlowHash: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
rootContainer, rootDirectory := rootSplit(f.root)
|
||||
if rootContainer != "" && rootDirectory != "" {
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.root = strings.Trim(newRoot, "/")
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = strings.Trim(oldRoot, "/")
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Mkdir to create directory on remote server.
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fullPath := path.Clean(f.root + "/" + dir)
|
||||
_, err := f.createFolder(ctx, fullPath)
|
||||
return err
|
||||
}
|
||||
|
||||
// About provides usage statistics for the remote
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
accountInfo, err := f.getAccountInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalStorage, err := parseStorageToBytes(accountInfo.Result.Storage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse total storage: %w", err)
|
||||
}
|
||||
|
||||
usedStorage, err := parseStorageToBytes(accountInfo.Result.StorageUsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse used storage: %w", err)
|
||||
}
|
||||
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(totalStorage), // Total bytes available
|
||||
Used: fs.NewUsageValue(usedStorage), // Total bytes used
|
||||
Free: fs.NewUsageValue(totalStorage - usedStorage),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Purge deletes the directory and all its contents
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
fullPath := path.Join(f.root, dir)
|
||||
if fullPath != "" {
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
}
|
||||
return f.deleteFolder(ctx, fullPath)
|
||||
}
|
||||
|
||||
// List returns a list of files and folders
|
||||
// List returns a list of files and folders for the given directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
// Compose full path for API call
|
||||
fullPath := path.Join(f.root, dir)
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
if fullPath == "/" {
|
||||
fullPath = ""
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
result, err := f.getFolderList(ctx, fullPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fldMap := map[string]bool{}
|
||||
for _, folder := range result.Result.Folders {
|
||||
fldMap[folder.FldID.String()] = true
|
||||
if f.root == "" && dir == "" && strings.Contains(folder.Path, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
paths := strings.Split(folder.Path, fullPath+"/")
|
||||
remote := paths[0]
|
||||
if len(paths) > 1 {
|
||||
remote = paths[1]
|
||||
}
|
||||
|
||||
if strings.Contains(remote, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
pathsWithoutRoot := strings.Split(folder.Path, "/"+f.root+"/")
|
||||
remotePathWithoutRoot := pathsWithoutRoot[0]
|
||||
if len(pathsWithoutRoot) > 1 {
|
||||
remotePathWithoutRoot = pathsWithoutRoot[1]
|
||||
}
|
||||
remotePathWithoutRoot = strings.TrimPrefix(remotePathWithoutRoot, "/")
|
||||
entries = append(entries, fs.NewDir(remotePathWithoutRoot, time.Now()))
|
||||
}
|
||||
for _, file := range result.Result.Files {
|
||||
if _, ok := fldMap[file.FldID.String()]; ok {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, file.Name)
|
||||
// trim leading slashes
|
||||
remote = strings.TrimPrefix(remote, "/")
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: file.Size,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
entries = append(entries, obj)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put uploads a file directly to the destination folder in the FileLu storage system.
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
err := f.uploadFile(ctx, in, src.Remote())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newObject := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}
|
||||
fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote())
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
// Move moves the file to the specified location
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, destinationPath string) (fs.Object, error) {
|
||||
|
||||
if strings.HasPrefix(destinationPath, "/") || strings.Contains(destinationPath, ":\\") {
|
||||
dir := path.Dir(destinationPath)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
reader, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
dest, err := os.Create(destinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(dest, reader); err != nil {
|
||||
return nil, fmt.Errorf("failed to copy file content: %w", err)
|
||||
}
|
||||
|
||||
if err := src.Remove(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove source file: %w", err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
reader, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open source object: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = f.uploadFile(ctx, reader, destinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload file to destination: %w", err)
|
||||
}
|
||||
|
||||
if err := src.Remove(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete source file: %w", err)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: destinationPath,
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Rmdir removes a directory
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
fullPath := path.Join(f.root, dir)
|
||||
if fullPath != "" {
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
}
|
||||
|
||||
// Step 1: Check if folder is empty
|
||||
listResp, err := f.getFolderList(ctx, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(listResp.Result.Files) > 0 || len(listResp.Result.Folders) > 0 {
|
||||
return fmt.Errorf("Rmdir: directory %q is not empty", fullPath)
|
||||
}
|
||||
|
||||
// Step 2: Delete the folder
|
||||
return f.deleteFolder(ctx, fullPath)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
324
backend/filelu/filelu_client.go
Normal file
324
backend/filelu/filelu_client.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/backend/filelu/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// createFolder creates a folder at the specified path.
|
||||
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
|
||||
encodedDir := f.fromStandardPath(dirPath)
|
||||
apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(encodedDir),
|
||||
url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field
|
||||
)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
result := api.CreateFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var innerErr error
|
||||
resp, innerErr = f.client.Do(req)
|
||||
return fserrors.ShouldRetry(innerErr), innerErr
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = json.NewDecoder(resp.Body).Decode(&result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
if result.Status != 200 {
|
||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||
}
|
||||
|
||||
fs.Infof(f, "Successfully created folder %q with ID %v", dirPath, result.Result.FldID)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getFolderList List both files and folders in a directory.
|
||||
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
|
||||
encodedDir := f.fromStandardPath(path)
|
||||
apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(encodedDir),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
var body []byte
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading response body: %w", err)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var response api.FolderListResponse
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
if response.Status != 200 {
|
||||
if strings.Contains(response.Msg, "Folder not found") {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("API error: %s", response.Msg)
|
||||
}
|
||||
|
||||
for index := range response.Result.Folders {
|
||||
response.Result.Folders[index].Path = f.toStandardPath(response.Result.Folders[index].Path)
|
||||
}
|
||||
|
||||
for index := range response.Result.Files {
|
||||
response.Result.Files[index].Name = f.toStandardPath(response.Result.Files[index].Name)
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
|
||||
}
|
||||
|
||||
// deleteFolder deletes a folder at the specified path.
|
||||
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
|
||||
fullPath = f.fromStandardPath(fullPath)
|
||||
deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(fullPath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
delResp := api.DeleteFolderResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &delResp); err != nil {
|
||||
return false, fmt.Errorf("error decoding delete response: %w", err)
|
||||
}
|
||||
if delResp.Status != 200 {
|
||||
return false, fmt.Errorf("delete error: %s", delResp.Msg)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(f, "Rmdir: successfully deleted %q", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDirectLink of file from FileLu to download.
|
||||
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
|
||||
filePath = f.fromStandardPath(filePath)
|
||||
apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(filePath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
result := api.FileDirectLinkResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
return result.Result.URL, result.Result.Size, nil
|
||||
}
|
||||
|
||||
// deleteFile deletes a file based on filePath
|
||||
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
|
||||
filePath = f.fromStandardPath(filePath)
|
||||
apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(filePath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
result := api.DeleteFileResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// getAccountInfo retrieves account information
|
||||
func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/account/info",
|
||||
Parameters: url.Values{
|
||||
"key": {f.opt.Key},
|
||||
},
|
||||
}
|
||||
|
||||
var result api.AccountInfoResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, callErr := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return fserrors.ShouldRetry(callErr), callErr
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getFileInfo retrieves file information based on file code
|
||||
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
|
||||
u, _ := url.Parse(f.endpoint + "/file/info2")
|
||||
q := u.Query()
|
||||
q.Set("file_code", fileCode) // raw path — Go handles escaping properly here
|
||||
q.Set("key", f.opt.Key)
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
apiURL := f.endpoint + "/file/info2?" + u.RawQuery
|
||||
|
||||
var body []byte
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading response body: %w", err)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := api.FileInfoResponse{}
|
||||
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 || len(result.Result) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
193
backend/filelu/filelu_file_uploader.go
Normal file
193
backend/filelu/filelu_file_uploader.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// uploadFile uploads a file to FileLu
|
||||
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
|
||||
directory := path.Dir(fileFullPath)
|
||||
fileName := path.Base(fileFullPath)
|
||||
if directory == "." {
|
||||
directory = ""
|
||||
}
|
||||
destinationFolderPath := path.Join(f.root, directory)
|
||||
if destinationFolderPath != "" {
|
||||
destinationFolderPath = "/" + strings.Trim(destinationFolderPath, "/")
|
||||
}
|
||||
|
||||
existingEntries, err := f.List(ctx, path.Dir(fileFullPath))
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||
err = f.Mkdir(ctx, path.Dir(fileFullPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("failed to list existing files: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, entry := range existingEntries {
|
||||
if entry.Remote() == fileFullPath {
|
||||
_, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the file exists but is different, remove it
|
||||
filePath := "/" + strings.Trim(destinationFolderPath+"/"+fileName, "/")
|
||||
err = f.deleteFile(ctx, filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete existing file: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uploadURL, sessID, err := f.getUploadServer(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve upload server: %w", err)
|
||||
}
|
||||
|
||||
// Since the fileCode isn't used, just handle the error
|
||||
if _, err := f.uploadFileWithDestination(ctx, uploadURL, sessID, fileName, fileContent, destinationFolderPath); err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getUploadServer gets the upload server URL with proper key authentication
|
||||
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
|
||||
apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key))
|
||||
|
||||
var result struct {
|
||||
Status int `json:"status"`
|
||||
SessID string `json:"sess_id"`
|
||||
Result string `json:"result"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return result.Result, result.SessID, nil
|
||||
}
|
||||
|
||||
// uploadFileWithDestination uploads a file directly to a specified folder using file content reader.
|
||||
func (f *Fs) uploadFileWithDestination(ctx context.Context, uploadURL, sessID, fileName string, fileContent io.Reader, dirPath string) (string, error) {
|
||||
destinationPath := f.fromStandardPath(dirPath)
|
||||
encodedFileName := f.fromStandardPath(fileName)
|
||||
pr, pw := io.Pipe()
|
||||
writer := multipart.NewWriter(pw)
|
||||
isDeletionRequired := false
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := pw.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close: %v", err)
|
||||
}
|
||||
}()
|
||||
_ = writer.WriteField("sess_id", sessID)
|
||||
_ = writer.WriteField("utype", "prem")
|
||||
_ = writer.WriteField("fld_path", destinationPath)
|
||||
|
||||
part, err := writer.CreateFormFile("file_0", encodedFileName)
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, fileContent); err != nil {
|
||||
isDeletionRequired = true
|
||||
pw.CloseWithError(fmt.Errorf("failed to copy file content: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("failed to close writer: %w", err))
|
||||
}
|
||||
}()
|
||||
|
||||
var fileCode string
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", uploadURL, pr)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create upload request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to send upload request: %w", err)
|
||||
}
|
||||
defer respBodyClose(resp.Body)
|
||||
|
||||
var result []struct {
|
||||
FileCode string `json:"file_code"`
|
||||
FileStatus string `json:"file_status"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("failed to parse upload response: %w", err)
|
||||
}
|
||||
|
||||
if len(result) == 0 || result[0].FileStatus != "OK" {
|
||||
return false, fmt.Errorf("upload failed with status: %s", result[0].FileStatus)
|
||||
}
|
||||
|
||||
fileCode = result[0].FileCode
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
|
||||
if err != nil && isDeletionRequired {
|
||||
// Attempt to delete the file if upload fails
|
||||
_ = f.deleteFile(ctx, destinationPath+"/"+fileName)
|
||||
}
|
||||
|
||||
return fileCode, err
|
||||
}
|
||||
|
||||
// respBodyClose to check body response.
|
||||
func respBodyClose(responseBody io.Closer) {
|
||||
if cerr := responseBody.Close(); cerr != nil {
|
||||
fmt.Printf("Error closing response body: %v\n", cerr)
|
||||
}
|
||||
}
|
||||
112
backend/filelu/filelu_helper.go
Normal file
112
backend/filelu/filelu_helper.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// errFileNotFound represent file not found error
|
||||
var errFileNotFound = errors.New("file not found")
|
||||
|
||||
// getFileCode retrieves the file code for a given file path
|
||||
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
||||
// Prepare parent directory
|
||||
parentDir := path.Dir(filePath)
|
||||
|
||||
// Call List to get all the files
|
||||
result, err := f.getFolderList(ctx, parentDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, file := range result.Result.Files {
|
||||
filePathFromServer := parentDir + "/" + file.Name
|
||||
if parentDir == "/" {
|
||||
filePathFromServer = "/" + file.Name
|
||||
}
|
||||
if filePath == filePathFromServer {
|
||||
return file.FileCode, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errFileNotFound
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
func (f *Fs) fromStandardPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(remote)
|
||||
}
|
||||
|
||||
func (f *Fs) toStandardPath(remote string) string {
|
||||
return f.opt.Enc.ToStandardPath(remote)
|
||||
}
|
||||
|
||||
// Hashes returns an empty hash set, indicating no hash support
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet() // Properly creates an empty hash set
|
||||
}
|
||||
|
||||
// Name returns the remote name
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root path
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Precision returns the precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("FileLu root '%s'", f.root)
|
||||
}
|
||||
|
||||
// isFileCode checks if a string looks like a file code
|
||||
func isFileCode(s string) bool {
|
||||
if len(s) != 12 {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
return fserrors.ShouldRetry(err)
|
||||
}
|
||||
|
||||
func shouldRetryHTTP(code int) bool {
|
||||
return code == 429 || code >= 500
|
||||
}
|
||||
|
||||
func rootSplit(absPath string) (bucket, bucketPath string) {
|
||||
// No bucket
|
||||
if absPath == "" {
|
||||
return "", ""
|
||||
}
|
||||
slash := strings.IndexRune(absPath, '/')
|
||||
// Bucket but no path
|
||||
if slash < 0 {
|
||||
return absPath, ""
|
||||
}
|
||||
return absPath[:slash], absPath[slash+1:]
|
||||
}
|
||||
259
backend/filelu/filelu_object.go
Normal file
259
backend/filelu/filelu_object.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Object describes a FileLu object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// NewObject creates a new Object for the given remote path
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
var filePath string
|
||||
filePath = path.Join(f.root, remote)
|
||||
filePath = "/" + strings.Trim(filePath, "/")
|
||||
|
||||
// Get File code
|
||||
fileCode, err := f.getFileCode(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Get File info
|
||||
fileInfos, err := f.getFileInfo(ctx, fileCode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get file info: %w", err)
|
||||
}
|
||||
|
||||
fileInfo := fileInfos.Result[0]
|
||||
size, _ := strconv.ParseInt(fileInfo.Size, 10, 64)
|
||||
|
||||
returnedRemote := remote
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: returnedRemote,
|
||||
size: size,
|
||||
modTime: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Open opens the object for reading
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
filePath := path.Join(o.fs.root, o.remote)
|
||||
// Get direct link
|
||||
directLink, size, err := o.fs.getDirectLink(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get direct link: %w", err)
|
||||
}
|
||||
|
||||
o.size = size
|
||||
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reader io.ReadCloser
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create download request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to download file: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Wrap the response body to handle offset and count
|
||||
currentContents, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
if offset > 0 {
|
||||
if offset > int64(len(currentContents)) {
|
||||
return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents))
|
||||
}
|
||||
currentContents = currentContents[offset:]
|
||||
}
|
||||
if count > 0 && count < int64(len(currentContents)) {
|
||||
currentContents = currentContents[:count]
|
||||
}
|
||||
reader = io.NopCloser(bytes.NewReader(currentContents))
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Update updates the object with new data
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if src.Size() <= 0 {
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
err := o.fs.uploadFile(ctx, in, o.remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
o.size = src.Size()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the object from FileLu
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
fullPath := "/" + strings.Trim(path.Join(o.fs.root, o.remote), "/")
|
||||
|
||||
err := o.fs.deleteFile(ctx, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(o.fs, "Successfully deleted file: %s", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash returns the MD5 hash of an object
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
var fileCode string
|
||||
if isFileCode(o.fs.root) {
|
||||
fileCode = o.fs.root
|
||||
} else {
|
||||
matches := regexp.MustCompile(`\((.*?)\)`).FindAllStringSubmatch(o.remote, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) > 1 && len(match[1]) == 12 {
|
||||
fileCode = match[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if fileCode == "" {
|
||||
return "", fmt.Errorf("no valid file code found in the remote path")
|
||||
}
|
||||
|
||||
apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s",
|
||||
o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key))
|
||||
|
||||
var result struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result []struct {
|
||||
Hash string `json:"hash"`
|
||||
} `json:"result"`
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), err
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if result.Status != 200 || len(result.Result) == 0 {
|
||||
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
|
||||
}
|
||||
|
||||
return result.Result[0].Hash, nil
|
||||
}
|
||||
|
||||
// String returns a string representation of the object
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the object
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable indicates whether the object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
16
backend/filelu/filelu_test.go
Normal file
16
backend/filelu/filelu_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package filelu_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests for the FileLu backend
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFileLu:",
|
||||
NilObject: nil,
|
||||
SkipInvalidUTF8: true,
|
||||
})
|
||||
}
|
||||
15
backend/filelu/utils.go
Normal file
15
backend/filelu/utils.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// parseStorageToBytes converts a storage string (e.g., "10") to bytes
|
||||
func parseStorageToBytes(storage string) (int64, error) {
|
||||
var gb float64
|
||||
_, err := fmt.Sscanf(storage, "%f", &gb)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse storage: %w", err)
|
||||
}
|
||||
return int64(gb * 1024 * 1024 * 1024), nil
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -169,11 +170,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
}
|
||||
|
||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if apiErr.HttpCode == e {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
if slices.Contains(retryErrorCodes, apiErr.HttpCode) {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -162,6 +163,16 @@ Enabled by default. Use 0 to disable.`,
|
||||
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "allow_insecure_tls_ciphers",
|
||||
Help: `Allow insecure TLS ciphers
|
||||
|
||||
Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults:
|
||||
|
||||
- TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shut_timeout",
|
||||
Help: "Maximum time to wait for data connection closing status.",
|
||||
@@ -185,6 +196,14 @@ Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "http_proxy",
|
||||
Default: "",
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -227,28 +246,30 @@ a write only folder.
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
ExplicitTLS bool `config:"explicit_tls"`
|
||||
TLSCacheSize int `config:"tls_cache_size"`
|
||||
DisableTLS13 bool `config:"disable_tls13"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
ForceListHidden bool `config:"force_list_hidden"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
ExplicitTLS bool `config:"explicit_tls"`
|
||||
TLSCacheSize int `config:"tls_cache_size"`
|
||||
DisableTLS13 bool `config:"disable_tls13"`
|
||||
AllowInsecureTLSCiphers bool `config:"allow_insecure_tls_ciphers"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
ForceListHidden bool `config:"force_list_hidden"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -262,10 +283,12 @@ type Fs struct {
|
||||
user string
|
||||
pass string
|
||||
dialAddr string
|
||||
tlsConf *tls.Config // default TLS client config
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
proxyURL *url.URL // address of HTTP proxy read from environment
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
@@ -386,9 +409,14 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
func (f *Fs) tlsConfig() *tls.Config {
|
||||
var tlsConfig *tls.Config
|
||||
if f.opt.TLS || f.opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
if f.tlsConf != nil {
|
||||
tlsConfig = f.tlsConf.Clone()
|
||||
} else {
|
||||
tlsConfig = new(tls.Config)
|
||||
}
|
||||
tlsConfig.ServerName = f.opt.Host
|
||||
if f.opt.SkipVerifyTLSCert {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
}
|
||||
if f.opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
||||
@@ -396,6 +424,14 @@ func (f *Fs) tlsConfig() *tls.Config {
|
||||
if f.opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
if f.opt.AllowInsecureTLSCiphers {
|
||||
var ids []uint16
|
||||
// Read default ciphers
|
||||
for _, cs := range tls.CipherSuites() {
|
||||
ids = append(ids, cs.ID)
|
||||
}
|
||||
tlsConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
|
||||
}
|
||||
}
|
||||
return tlsConfig
|
||||
}
|
||||
@@ -413,11 +449,28 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
fs.Debugf(f, "dial(%q,%q)", network, address)
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "> dial: conn=%v, err=%v", conn, err)
|
||||
} else {
|
||||
fs.Debugf(f, "> dial: conn=%s->%s, err=%v", conn.LocalAddr(), conn.RemoteAddr(), err)
|
||||
}
|
||||
}()
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
if f.opt.SocksProxy != "" || f.proxyURL != nil {
|
||||
// We need to make the onward connection to f.opt.Host. However the FTP
|
||||
// library sets the host to the proxy IP after using EPSV or PASV so we need
|
||||
// to correct that here.
|
||||
var dialPort string
|
||||
_, dialPort, err = net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, dialAddress, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
||||
}
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
@@ -626,11 +679,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
tlsConf: fshttp.NewTransport(ctx).TLSClientConfig,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type settings map[string]interface{}
|
||||
type settings map[string]any
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
@@ -52,7 +52,7 @@ func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
ci.Timeout = saveTimeout
|
||||
}()
|
||||
ci.LowLevelRetries = 1
|
||||
ci.Timeout = idleTimeout
|
||||
ci.Timeout = fs.Duration(idleTimeout)
|
||||
|
||||
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
|
||||
fixFs := deriveFs(ctx, t, f, settings{
|
||||
|
||||
@@ -194,33 +194,9 @@ type DeleteResponse struct {
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// Server is an upload server
|
||||
type Server struct {
|
||||
Name string `json:"name"`
|
||||
Zone string `json:"zone"`
|
||||
}
|
||||
|
||||
// String returns a string representation of the Server
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||
}
|
||||
|
||||
// Root returns the root URL for the server
|
||||
func (s *Server) Root() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||
}
|
||||
|
||||
// URL returns the upload URL for the server
|
||||
func (s *Server) URL() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||
}
|
||||
|
||||
// ServersResponse is the output from /servers
|
||||
type ServersResponse struct {
|
||||
Error
|
||||
Data struct {
|
||||
Servers []Server `json:"servers"`
|
||||
} `json:"data"`
|
||||
// DirectUploadURL returns the direct upload URL for Gofile
|
||||
func DirectUploadURL() string {
|
||||
return "https://upload.gofile.io/uploadfile"
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user