mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
859 Commits
fix-5224-s
...
feat/cache
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2804f5068a | ||
|
|
d1ac6c2fe1 | ||
|
|
da9c99272c | ||
|
|
9c7594d78f | ||
|
|
70226cc653 | ||
|
|
c20e4bd99c | ||
|
|
ccfe153e9b | ||
|
|
c9730bcaaf | ||
|
|
03dd7486c1 | ||
|
|
6249009fdf | ||
|
|
8e2d76459f | ||
|
|
5e539c6a72 | ||
|
|
8866112400 | ||
|
|
bfdd5e2c22 | ||
|
|
f3f16cd2b9 | ||
|
|
d84ea2ec52 | ||
|
|
b259241c07 | ||
|
|
a8ab0730a7 | ||
|
|
cef207cf94 | ||
|
|
e728ea32d1 | ||
|
|
ccdee0420f | ||
|
|
8a51e11d23 | ||
|
|
9083f1ff15 | ||
|
|
2964b1a169 | ||
|
|
b6767820de | ||
|
|
821e7fce45 | ||
|
|
b7c6268d3e | ||
|
|
521d6b88d4 | ||
|
|
cf767b0856 | ||
|
|
25f7809822 | ||
|
|
74c0b1ea3b | ||
|
|
f4dcb1e9cf | ||
|
|
90f1d023ff | ||
|
|
e9c5f2d4e8 | ||
|
|
1249e9b5ac | ||
|
|
d47bc5f6c4 | ||
|
|
efb1794135 | ||
|
|
71b98a03a9 | ||
|
|
8e625c6593 | ||
|
|
6b2cd7c631 | ||
|
|
aa4aead63c | ||
|
|
c491d12cd0 | ||
|
|
9e4d703a56 | ||
|
|
fc0c0a7771 | ||
|
|
d5cc0d83b0 | ||
|
|
52762dc866 | ||
|
|
3c092cfc17 | ||
|
|
7f3f1af541 | ||
|
|
f885c481f0 | ||
|
|
865d4b2bda | ||
|
|
3cb1e65eb6 | ||
|
|
f667346718 | ||
|
|
c6e1f59415 | ||
|
|
f353c92852 | ||
|
|
1e88c6a18b | ||
|
|
7242aed1c3 | ||
|
|
81e63785fe | ||
|
|
c7937f53d4 | ||
|
|
58fa1c975f | ||
|
|
da49fc1b6d | ||
|
|
df9c921dd5 | ||
|
|
d9c227eff6 | ||
|
|
524c285d88 | ||
|
|
4107246335 | ||
|
|
87a65ec6a5 | ||
|
|
c6d0b61982 | ||
|
|
88e30eecbf | ||
|
|
f904378c4d | ||
|
|
24eb8dcde0 | ||
|
|
a97425d9cb | ||
|
|
c51878f9a9 | ||
|
|
92f0a73ac6 | ||
|
|
163c149f3f | ||
|
|
224ca0ae8e | ||
|
|
5bf6cd1f4f | ||
|
|
555739eec5 | ||
|
|
c036ce90fe | ||
|
|
6163ae7cc7 | ||
|
|
cd950e30cb | ||
|
|
89dfae96ad | ||
|
|
c0a2d730a6 | ||
|
|
592407230b | ||
|
|
a7c3ddb482 | ||
|
|
7a1813c531 | ||
|
|
16e3d1becd | ||
|
|
c0f6b910ae | ||
|
|
e3bf8dc122 | ||
|
|
086a835131 | ||
|
|
d0668de192 | ||
|
|
4df974ccc4 | ||
|
|
a50c903a82 | ||
|
|
97a8092c14 | ||
|
|
526565b810 | ||
|
|
64804b81bd | ||
|
|
e10f516a5e | ||
|
|
fe62a2bb4e | ||
|
|
d6ecb949ca | ||
|
|
a845a96538 | ||
|
|
92f30fda8d | ||
|
|
559ef2eba8 | ||
|
|
17b25d7ce2 | ||
|
|
fe3253eefd | ||
|
|
c38ca6b2d1 | ||
|
|
5aa9811084 | ||
|
|
3cae373064 | ||
|
|
b6b8526fb4 | ||
|
|
6f86143176 | ||
|
|
beffef2882 | ||
|
|
96f5bbcdd7 | ||
|
|
27ce78bee4 | ||
|
|
898a59062b | ||
|
|
c5f55243e1 | ||
|
|
62a9727ab5 | ||
|
|
16f1e08b73 | ||
|
|
4280ec75cc | ||
|
|
b064cc2116 | ||
|
|
f8b50f8d8f | ||
|
|
9d464e8e9a | ||
|
|
92fea7eb1b | ||
|
|
f226d12a2f | ||
|
|
359260c49d | ||
|
|
125c8a98bb | ||
|
|
81fccd9c39 | ||
|
|
1dc3421c7f | ||
|
|
073184132e | ||
|
|
476ff65fd7 | ||
|
|
2847412433 | ||
|
|
5c81132da0 | ||
|
|
6e1c7b9239 | ||
|
|
e469c8974c | ||
|
|
629b427443 | ||
|
|
108504963c | ||
|
|
6aa09fb1d6 | ||
|
|
bfa6852334 | ||
|
|
63d55d4a39 | ||
|
|
578ee49550 | ||
|
|
dda6a863e9 | ||
|
|
99358cee88 | ||
|
|
768a4236e6 | ||
|
|
ffbf002ba8 | ||
|
|
4a1b5b864c | ||
|
|
3b3096c940 | ||
|
|
51fd697c7a | ||
|
|
210acb42cd | ||
|
|
6c36615efe | ||
|
|
d4e2717081 | ||
|
|
013c563293 | ||
|
|
41a407dcc9 | ||
|
|
cf1f5a7af6 | ||
|
|
597872e5d7 | ||
|
|
e2d6872745 | ||
|
|
ddebca8d42 | ||
|
|
5173ca0454 | ||
|
|
ccac9813f3 | ||
|
|
9133fd03df | ||
|
|
2e891f4ff8 | ||
|
|
3c66d9ccb1 | ||
|
|
badf16cc34 | ||
|
|
0ee7cd80f2 | ||
|
|
aeb43c6a4c | ||
|
|
12322a2141 | ||
|
|
4fd5a3d0a2 | ||
|
|
3594330177 | ||
|
|
15510c66d4 | ||
|
|
dfa4d94827 | ||
|
|
36b89960e3 | ||
|
|
a3f3fc61ee | ||
|
|
b8fde4fc46 | ||
|
|
c37fe733df | ||
|
|
b31659904f | ||
|
|
ebcf51336e | ||
|
|
a334bba643 | ||
|
|
d4fd93e7f3 | ||
|
|
6644bdba0f | ||
|
|
68a65e878f | ||
|
|
7606ad8294 | ||
|
|
32847e88b4 | ||
|
|
2e879586bd | ||
|
|
9d55b2411f | ||
|
|
fe880c0fac | ||
|
|
b160089be7 | ||
|
|
c2254164f8 | ||
|
|
e57b94c4ac | ||
|
|
3273bf3716 | ||
|
|
f5501edfcf | ||
|
|
2404831725 | ||
|
|
9f0e237931 | ||
|
|
f752eaa298 | ||
|
|
1f8373fae8 | ||
|
|
b94f80b9d7 | ||
|
|
5f4e983ccb | ||
|
|
28b6f38135 | ||
|
|
6adb4056bb | ||
|
|
0b9671313b | ||
|
|
e0c99d6203 | ||
|
|
7af1a930b7 | ||
|
|
6e46ee4ffa | ||
|
|
4f1fc1a84e | ||
|
|
c10b6c5e8e | ||
|
|
52ff407116 | ||
|
|
078d202f39 | ||
|
|
3e105f7e58 | ||
|
|
02ca72e30c | ||
|
|
e567c52457 | ||
|
|
10501d0398 | ||
|
|
972ed42661 | ||
|
|
48802b0a3b | ||
|
|
a9c7c493cf | ||
|
|
49f6ed5f5e | ||
|
|
a5d03e0ada | ||
|
|
199f61cefa | ||
|
|
fa78c6443e | ||
|
|
52e2e4b84c | ||
|
|
1c933372fe | ||
|
|
f5dfe3f5a6 | ||
|
|
5702b7578c | ||
|
|
703788b40e | ||
|
|
aef9c2117e | ||
|
|
2a42d95385 | ||
|
|
e37775bb41 | ||
|
|
780f4040ea | ||
|
|
0b7be6ffb9 | ||
|
|
4d9a165e56 | ||
|
|
21e5fa192a | ||
|
|
cf571ad661 | ||
|
|
b1456835d8 | ||
|
|
b930c4b437 | ||
|
|
cebd588092 | ||
|
|
3c981e6c2c | ||
|
|
6054c4e49d | ||
|
|
028316ba5d | ||
|
|
df457f5802 | ||
|
|
084e35c49d | ||
|
|
90ea4a73ad | ||
|
|
efe8ac8f35 | ||
|
|
894ef3b375 | ||
|
|
385465bfa9 | ||
|
|
0148bd4668 | ||
|
|
0f7ecf6f06 | ||
|
|
08e81f8420 | ||
|
|
0ac2d2f50f | ||
|
|
42fcb0a6fc | ||
|
|
490dd14bc5 | ||
|
|
943ea0acae | ||
|
|
d64a97f973 | ||
|
|
5d8f1d4b88 | ||
|
|
b1d774c2e3 | ||
|
|
fad579c4a2 | ||
|
|
37120ef7bd | ||
|
|
cba653d502 | ||
|
|
2a90de9502 | ||
|
|
bff229713a | ||
|
|
117f583ebe | ||
|
|
205667143c | ||
|
|
fe84cbdc9d | ||
|
|
533c6438f3 | ||
|
|
b587b094c9 | ||
|
|
525798e1a5 | ||
|
|
ea63052d36 | ||
|
|
b5a99c5011 | ||
|
|
56b7015675 | ||
|
|
4ff970ebab | ||
|
|
dccb5144c3 | ||
|
|
33b087171a | ||
|
|
58d9ae1c60 | ||
|
|
20302ab6b9 | ||
|
|
6fb0de62a4 | ||
|
|
839eef0db2 | ||
|
|
267eebe5c9 | ||
|
|
755d72a591 | ||
|
|
4d38424e6c | ||
|
|
53624222c9 | ||
|
|
44e83d77d7 | ||
|
|
19aa366d88 | ||
|
|
3fb4164d87 | ||
|
|
4e2b78f65d | ||
|
|
e47f59e1f9 | ||
|
|
63c4fef27a | ||
|
|
a7a7c1d592 | ||
|
|
6a7e68aaf2 | ||
|
|
6e7a3795f1 | ||
|
|
177337686a | ||
|
|
ccef29bbff | ||
|
|
64b3d1d539 | ||
|
|
aab6643cea | ||
|
|
2a1e28f5f5 | ||
|
|
db9205b298 | ||
|
|
964c6204dd | ||
|
|
65f7eb0fba | ||
|
|
401cf81034 | ||
|
|
431386085f | ||
|
|
bf150a5b7d | ||
|
|
ddecfe6e77 | ||
|
|
68e40dc141 | ||
|
|
325f400a88 | ||
|
|
be33e281b3 | ||
|
|
0010090d05 | ||
|
|
b7f26937f1 | ||
|
|
5037d7368d | ||
|
|
0ccf65017f | ||
|
|
85d467e16a | ||
|
|
cf4b55d965 | ||
|
|
e0d477804b | ||
|
|
4fc9583feb | ||
|
|
904c9b2e24 | ||
|
|
cdfd748241 | ||
|
|
661027f2cf | ||
|
|
7ecd1638eb | ||
|
|
06b92ddeb3 | ||
|
|
ceef78ce44 | ||
|
|
6560ea9bdc | ||
|
|
cda82f3d30 | ||
|
|
7da2d8b507 | ||
|
|
fb7919928c | ||
|
|
5d670fc54a | ||
|
|
b5e72e2fc3 | ||
|
|
8997993a30 | ||
|
|
b721f363e5 | ||
|
|
d93dad22fe | ||
|
|
e27bf8b738 | ||
|
|
539e96cc1f | ||
|
|
5086aad0b2 | ||
|
|
c1b414e2cf | ||
|
|
2ff8aa1c20 | ||
|
|
6d2a72367a | ||
|
|
9df751d4ec | ||
|
|
e175c863aa | ||
|
|
64cd8ae0f0 | ||
|
|
46b498b86a | ||
|
|
b76cd74087 | ||
|
|
3b49fd24d4 | ||
|
|
c0515a51a5 | ||
|
|
dc9c87279b | ||
|
|
057fdb3a9d | ||
|
|
3daf62cf3d | ||
|
|
0ef495fa76 | ||
|
|
722c567504 | ||
|
|
0ebe1c0f81 | ||
|
|
2dc06b2548 | ||
|
|
b52aabd8fe | ||
|
|
6494ac037f | ||
|
|
5c3a1bbf30 | ||
|
|
c837664653 | ||
|
|
77429b154e | ||
|
|
39b8f17ebb | ||
|
|
81ecfb0f64 | ||
|
|
656e789c5b | ||
|
|
fe19184084 | ||
|
|
b4990cd858 | ||
|
|
8e955c6b13 | ||
|
|
3a5ddfcd3c | ||
|
|
ac3f7a87c3 | ||
|
|
4e9b63e141 | ||
|
|
7fd7fe3c82 | ||
|
|
9dff45563d | ||
|
|
83cf8fb821 | ||
|
|
32e79a5c5c | ||
|
|
fc44a8114e | ||
|
|
657172ef77 | ||
|
|
71eb4199c3 | ||
|
|
ac3c21368d | ||
|
|
db71b2bd5f | ||
|
|
8cfe42d09f | ||
|
|
e673a28a72 | ||
|
|
59889ce46b | ||
|
|
62e8a01e7e | ||
|
|
87eaf37629 | ||
|
|
7c7606a6cf | ||
|
|
dbb21165d4 | ||
|
|
375953cba3 | ||
|
|
af5385b344 | ||
|
|
347be176af | ||
|
|
bf5a4774c6 | ||
|
|
0275d3edf2 | ||
|
|
be53ae98f8 | ||
|
|
0d9fe51632 | ||
|
|
03bd795221 | ||
|
|
5a4026ccb4 | ||
|
|
b1d4de69c2 | ||
|
|
5316acd046 | ||
|
|
2c72842c10 | ||
|
|
4a81f12c26 | ||
|
|
aabda1cda2 | ||
|
|
572fe20f8e | ||
|
|
2fd4c45b34 | ||
|
|
ec5489e23f | ||
|
|
6898375a2d | ||
|
|
d413443a6a | ||
|
|
5039747f26 | ||
|
|
11ba4ac539 | ||
|
|
b4ed7fb7d7 | ||
|
|
719473565e | ||
|
|
bd7278d7e9 | ||
|
|
45ba81c726 | ||
|
|
530658e0cc | ||
|
|
b742705d0c | ||
|
|
cd3b08d8cf | ||
|
|
009660a489 | ||
|
|
4b6c7c6d84 | ||
|
|
a7db375f5d | ||
|
|
101dcfe157 | ||
|
|
aec87b74d3 | ||
|
|
91c8f92ccb | ||
|
|
965bf19065 | ||
|
|
15ef3b90fa | ||
|
|
f6efaf2a63 | ||
|
|
0e7c495395 | ||
|
|
ff0ded8f11 | ||
|
|
110bf468a4 | ||
|
|
d4e86f4d8b | ||
|
|
6091a0362b | ||
|
|
33d2747829 | ||
|
|
c9e5f45d73 | ||
|
|
2f66537514 | ||
|
|
a491312c7d | ||
|
|
45b7690867 | ||
|
|
30ef1ddb23 | ||
|
|
424d8e3123 | ||
|
|
04dfa6d923 | ||
|
|
fdff1a54ee | ||
|
|
42240f4b5d | ||
|
|
7692ef289f | ||
|
|
bfb7b88371 | ||
|
|
5f70918e2c | ||
|
|
abf11271fe | ||
|
|
a36e89bb61 | ||
|
|
35614acf59 | ||
|
|
7e4b8e33f5 | ||
|
|
5151a663f0 | ||
|
|
b85a1b684b | ||
|
|
8fa8f146fa | ||
|
|
6cad0a013e | ||
|
|
aa743cbc60 | ||
|
|
a389a2979b | ||
|
|
d6f0d1d349 | ||
|
|
4ed6960d95 | ||
|
|
731af0c0ab | ||
|
|
5499fd3b59 | ||
|
|
e0e697ca11 | ||
|
|
05f000b076 | ||
|
|
a34c839514 | ||
|
|
6a217c7dc1 | ||
|
|
e1748a3183 | ||
|
|
bc08e05a00 | ||
|
|
9218b69afe | ||
|
|
0ce2e12d9f | ||
|
|
7224b76801 | ||
|
|
d2398ccb59 | ||
|
|
0988fd9e9f | ||
|
|
51cde23e82 | ||
|
|
caac95ff54 | ||
|
|
19f4580aca | ||
|
|
27f448d14d | ||
|
|
500698c5be | ||
|
|
91af6da068 | ||
|
|
b8835fe7b4 | ||
|
|
48d9e88e8f | ||
|
|
4e7ee9310e | ||
|
|
d629102fa6 | ||
|
|
db1ed69693 | ||
|
|
06657c49a0 | ||
|
|
f1d2f2b2c8 | ||
|
|
a5abe4b8b3 | ||
|
|
c0339327be | ||
|
|
353bc3130e | ||
|
|
126f00882b | ||
|
|
44c3f5e1e8 | ||
|
|
c47c94e485 | ||
|
|
1f328fbcfd | ||
|
|
7f1240516e | ||
|
|
f9946b37f9 | ||
|
|
96fe25cf0a | ||
|
|
a176d4cbda | ||
|
|
e704e33045 | ||
|
|
2f3e90f671 | ||
|
|
65012beea4 | ||
|
|
704217b698 | ||
|
|
6ade1055d5 | ||
|
|
6a983d601c | ||
|
|
eaafae95fa | ||
|
|
5ca1436c24 | ||
|
|
c46e93cc42 | ||
|
|
66943d3d79 | ||
|
|
a78bc093de | ||
|
|
2446c4928d | ||
|
|
e11e679e90 | ||
|
|
ba8e538173 | ||
|
|
40111ba5e1 | ||
|
|
ab58ae5b03 | ||
|
|
ca8860177e | ||
|
|
d65d1a44b3 | ||
|
|
c1763a3f95 | ||
|
|
964fcd5f59 | ||
|
|
c6281a1217 | ||
|
|
ff3f8f0b33 | ||
|
|
2d844a26c3 | ||
|
|
1b68492c85 | ||
|
|
acd5a893e2 | ||
|
|
0214a59a8c | ||
|
|
6079cab090 | ||
|
|
bf57087a6e | ||
|
|
d8bc542ffc | ||
|
|
01ccf204f4 | ||
|
|
84b64dcdf9 | ||
|
|
8cc1020a58 | ||
|
|
1e2b354456 | ||
|
|
f639cd9c78 | ||
|
|
e50f995d87 | ||
|
|
abe884e744 | ||
|
|
173b2ac956 | ||
|
|
1317fdb9b8 | ||
|
|
1072173d58 | ||
|
|
df19c6f7bf | ||
|
|
ee72554fb9 | ||
|
|
abb4f77568 | ||
|
|
ca2b27422f | ||
|
|
740f6b318c | ||
|
|
f307d929a8 | ||
|
|
ceea6753ee | ||
|
|
2bafbf3c04 | ||
|
|
3e14ba54b8 | ||
|
|
2f7a30cf61 | ||
|
|
0ad925278d | ||
|
|
e3053350f3 | ||
|
|
b9207e5727 | ||
|
|
40159e7a16 | ||
|
|
16baa24964 | ||
|
|
72f06bcc4b | ||
|
|
c527dd8c9c | ||
|
|
29fd894189 | ||
|
|
175aa07cdd | ||
|
|
75257fc9cd | ||
|
|
53ff3b3b32 | ||
|
|
8b4b59412d | ||
|
|
264c9fb2c0 | ||
|
|
1b10cd3732 | ||
|
|
d97492cbc3 | ||
|
|
82a510e793 | ||
|
|
9f2c590e13 | ||
|
|
11a90917ec | ||
|
|
8ca7b2af07 | ||
|
|
a19ddffe92 | ||
|
|
3e2c0f8c04 | ||
|
|
589458d1fe | ||
|
|
69897b97fb | ||
|
|
4db09331c6 | ||
|
|
fcd3b88332 | ||
|
|
1ca3f12672 | ||
|
|
e7a0fd0f70 | ||
|
|
c23c59544d | ||
|
|
9dec3de990 | ||
|
|
5caa695c79 | ||
|
|
8400809900 | ||
|
|
e49516d5f4 | ||
|
|
9614fc60f2 | ||
|
|
51db76fd47 | ||
|
|
17e7ccfad5 | ||
|
|
8a6fc8535d | ||
|
|
c053429b9c | ||
|
|
18989fbf85 | ||
|
|
a7451c6a77 | ||
|
|
5147d1101c | ||
|
|
11ad2a1316 | ||
|
|
3c7ad8d961 | ||
|
|
a3e8fb584a | ||
|
|
9b4b3033da | ||
|
|
94997d25d2 | ||
|
|
19458e8459 | ||
|
|
7d32da441e | ||
|
|
22e13eea47 | ||
|
|
de9b593f02 | ||
|
|
b2b4f8196c | ||
|
|
84cebb6872 | ||
|
|
cb9f4f8461 | ||
|
|
498d9cfa85 | ||
|
|
109e4ed0ed | ||
|
|
353270263a | ||
|
|
f8d782c02d | ||
|
|
3dec664a19 | ||
|
|
a849fd59f0 | ||
|
|
462a1cf491 | ||
|
|
0b7b3cacdc | ||
|
|
976103d50b | ||
|
|
192524c004 | ||
|
|
28667f58bf | ||
|
|
c669f4e218 | ||
|
|
1a9e6a527d | ||
|
|
8c48cadd9c | ||
|
|
76e1ba8c46 | ||
|
|
232e4cd18f | ||
|
|
88141928f2 | ||
|
|
a2a0388036 | ||
|
|
48543d38e8 | ||
|
|
eceb390152 | ||
|
|
f4deffdc96 | ||
|
|
c172742cef | ||
|
|
7daed30754 | ||
|
|
b1b4c7f27b | ||
|
|
ed84553dc1 | ||
|
|
c94edbb76b | ||
|
|
2dcb327bc0 | ||
|
|
874d66658e | ||
|
|
3af757e26d | ||
|
|
fef1b61585 | ||
|
|
3fca7a60a5 | ||
|
|
6b3f41fa0c | ||
|
|
3d0ee47aa2 | ||
|
|
da70088b11 | ||
|
|
1bc9b94cf2 | ||
|
|
15a026d3be | ||
|
|
ad122c6f6f | ||
|
|
b155231cdd | ||
|
|
49f69196c2 | ||
|
|
3f7651291b | ||
|
|
796013dd06 | ||
|
|
e0da406ca7 | ||
|
|
9a02c04028 | ||
|
|
918185273f | ||
|
|
3f2074901a | ||
|
|
648afc7df4 | ||
|
|
16e0245a8e | ||
|
|
59acb9dfa9 | ||
|
|
bfec159504 | ||
|
|
842396c8a0 | ||
|
|
5f9a201b45 | ||
|
|
22583d0a5f | ||
|
|
f1466a429c | ||
|
|
e3b09211b8 | ||
|
|
156feff9f2 | ||
|
|
b29a22095f | ||
|
|
861c01caf5 | ||
|
|
f1a84d171e | ||
|
|
bcdfad3c83 | ||
|
|
88b0757288 | ||
|
|
33d6c3f92f | ||
|
|
752809309d | ||
|
|
4a54cc134f | ||
|
|
dfc2c98bbf | ||
|
|
604d6bcb9c | ||
|
|
d15704ef9f | ||
|
|
26bc9826e5 | ||
|
|
2a28b0eaf0 | ||
|
|
2d1c2b1f76 | ||
|
|
ffb2e2a6de | ||
|
|
c9c283533c | ||
|
|
71799d7efd | ||
|
|
8f4fdf6cc8 | ||
|
|
91b11f9eac | ||
|
|
b49927fbd0 | ||
|
|
1a8b7662e7 | ||
|
|
6ba3e24853 | ||
|
|
802a938bd1 | ||
|
|
9deb3e8adf | ||
|
|
296281a6eb | ||
|
|
711478554e | ||
|
|
906aef91fa | ||
|
|
6b58cd0870 | ||
|
|
af9f8ced80 | ||
|
|
c63f1865f3 | ||
|
|
1bb89bc818 | ||
|
|
a365503750 | ||
|
|
3bb6d0a42b | ||
|
|
f65755b3a3 | ||
|
|
33c5f35935 | ||
|
|
4367b999c9 | ||
|
|
b57e6213aa | ||
|
|
cd90ba4337 | ||
|
|
0e5eb7a9bb | ||
|
|
956c2963fd | ||
|
|
146562975b | ||
|
|
4c1cb0622e | ||
|
|
258092f9c6 | ||
|
|
be448c9e13 | ||
|
|
4e708e59f2 | ||
|
|
c8366dfef3 | ||
|
|
1e14523b82 | ||
|
|
da25305ba0 | ||
|
|
e439121ab2 | ||
|
|
37c12732f9 | ||
|
|
4c488e7517 | ||
|
|
7261f47bd2 | ||
|
|
1db8b20fbc | ||
|
|
a87d8967fc | ||
|
|
4804f1f1e9 | ||
|
|
d1c84f9115 | ||
|
|
e0b08883cb | ||
|
|
a0af72c27a | ||
|
|
28d6985764 | ||
|
|
f2ce9a9557 | ||
|
|
95151eac82 | ||
|
|
bd9bf4eb1c | ||
|
|
705c72d293 | ||
|
|
330c6702eb | ||
|
|
4d787ae87f | ||
|
|
86e9a56d73 | ||
|
|
64e8013c1b | ||
|
|
33bff6fe71 | ||
|
|
e82b5b11af | ||
|
|
4454ed9d3b | ||
|
|
bad8207378 | ||
|
|
c6d3714e73 | ||
|
|
59501fcdb6 | ||
|
|
afd199d756 | ||
|
|
00e073df1e | ||
|
|
2e007f89c7 | ||
|
|
edd9347694 | ||
|
|
1fad49ee35 | ||
|
|
182b2a6417 | ||
|
|
da9faf1ffe | ||
|
|
303358eeda | ||
|
|
62233b4993 | ||
|
|
498abcc062 | ||
|
|
482bfae8fa | ||
|
|
ae9960a4ed | ||
|
|
089c168fb9 | ||
|
|
6f515ded8f | ||
|
|
91c6faff71 | ||
|
|
874616a73e | ||
|
|
458d93ea7e | ||
|
|
513653910c | ||
|
|
bd5199910b | ||
|
|
f6d836eefd | ||
|
|
87ec26001f | ||
|
|
3e12612aae | ||
|
|
aee2480fc4 | ||
|
|
3ffa47ea16 | ||
|
|
70e8ad456f | ||
|
|
55b9b3e33a | ||
|
|
ce7dfa075c | ||
|
|
a697d27455 | ||
|
|
cae22a7562 | ||
|
|
877321c2fb | ||
|
|
574378e871 | ||
|
|
50d42babd8 | ||
|
|
13ea77dd71 | ||
|
|
62b76b631c | ||
|
|
96f92b7364 | ||
|
|
7c02a63884 | ||
|
|
67d4394a37 | ||
|
|
c1a98768bc | ||
|
|
bac9abebfb | ||
|
|
27b281ef69 | ||
|
|
10270a4354 | ||
|
|
d08b49d723 | ||
|
|
cb2d2d72a0 | ||
|
|
e686e34f89 | ||
|
|
5f66350331 | ||
|
|
e1d935b854 | ||
|
|
61b27cda80 | ||
|
|
83613634f9 | ||
|
|
1c80cbd13a | ||
|
|
9d5315a944 | ||
|
|
8d1d096c11 | ||
|
|
4b922d86d7 | ||
|
|
3b3625037c | ||
|
|
bfa3278f30 | ||
|
|
e334366345 | ||
|
|
642d4082ac | ||
|
|
024ff6ed15 | ||
|
|
d6b0743cf4 | ||
|
|
e4749cf0d0 | ||
|
|
8d2907d8f5 | ||
|
|
1720d3e11c | ||
|
|
c6352231e4 | ||
|
|
731947f3ca | ||
|
|
16d642825d | ||
|
|
50aebcf403 | ||
|
|
c8555d1b16 | ||
|
|
3ec0ff5d8f | ||
|
|
746516511d | ||
|
|
8aef1de695 | ||
|
|
cb611b8330 | ||
|
|
66ae050a8b | ||
|
|
fd9049c83d | ||
|
|
a1f52bcf50 | ||
|
|
0470450583 | ||
|
|
1901bae4eb | ||
|
|
9866d1c636 | ||
|
|
c5c7bcdd45 | ||
|
|
d5c7b55ba5 | ||
|
|
feafbfca52 | ||
|
|
abe01179ae | ||
|
|
612c717ea0 | ||
|
|
f26d2c6ba8 | ||
|
|
dcecb0ede4 | ||
|
|
47588a7fd0 | ||
|
|
ba381f8721 | ||
|
|
8f0ddcca4e | ||
|
|
404ef80025 | ||
|
|
13fa583368 | ||
|
|
e111ffba9e | ||
|
|
30ba7542ff | ||
|
|
31fabb3402 | ||
|
|
b3edc9d360 | ||
|
|
04f35fc3ac | ||
|
|
8e5dd79e4d | ||
|
|
b809e71d6f | ||
|
|
d149d1ec3e | ||
|
|
3b51ad24b2 | ||
|
|
485aa90d13 | ||
|
|
8958d06456 | ||
|
|
ca24447090 | ||
|
|
d008381e59 | ||
|
|
14629c66f9 | ||
|
|
4824837eed | ||
|
|
5287a9b5fa | ||
|
|
f2ce1767f0 | ||
|
|
7f048ac901 | ||
|
|
b0d0e0b267 | ||
|
|
f5eef420a4 | ||
|
|
9de485f949 | ||
|
|
d4b29fef92 | ||
|
|
471531eb6a | ||
|
|
afd2663057 | ||
|
|
97d6a00483 | ||
|
|
5ddedae431 | ||
|
|
e1b7bf7701 | ||
|
|
2a615f4681 | ||
|
|
e041796bfe | ||
|
|
1b9217bc78 | ||
|
|
846c1aeed0 | ||
|
|
56caab2033 | ||
|
|
495a5759d3 | ||
|
|
d9bd6f35f2 | ||
|
|
532a0818f7 | ||
|
|
91558ce6aa | ||
|
|
8fbb259091 | ||
|
|
4d2bc190cc | ||
|
|
c2bf300dd8 | ||
|
|
c954c397d9 | ||
|
|
25c6379688 | ||
|
|
ce1859cd82 | ||
|
|
cf25ae69ad | ||
|
|
dce8317042 | ||
|
|
eff2497633 | ||
|
|
28ba4b832d | ||
|
|
58da1a165c | ||
|
|
eec95a164d | ||
|
|
44cd2e07ca | ||
|
|
a28287e96d | ||
|
|
fc1d8dafd5 | ||
|
|
2c57fe9826 | ||
|
|
7c51b10d15 | ||
|
|
3280b6b83c | ||
|
|
1a77a2f92b | ||
|
|
c156716d01 | ||
|
|
0d9d0eef4c | ||
|
|
2e653f8128 | ||
|
|
e79273f9c9 | ||
|
|
8e10fe71f7 | ||
|
|
c6ab37a59f | ||
|
|
671a15f65f | ||
|
|
8d72698d5a | ||
|
|
6e853c82d8 | ||
|
|
27267547b9 | ||
|
|
cdcf0e5cb8 | ||
|
|
6507770014 | ||
|
|
bd5799c079 | ||
|
|
c834eb7dcb |
64
.github/workflows/build.yml
vendored
64
.github/workflows/build.yml
vendored
@@ -17,22 +17,24 @@ on:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 60
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +45,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +61,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,20 +78,14 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.20
|
||||
- job_name: go1.23
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.21
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '1.23'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -110,7 +106,6 @@ jobs:
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
@@ -119,16 +114,15 @@ jobs:
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
run: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||
@@ -157,7 +151,6 @@ jobs:
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Print Go version and environment
|
||||
shell: bash
|
||||
run: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
@@ -169,29 +162,24 @@ jobs:
|
||||
env
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Rclone version
|
||||
shell: bash
|
||||
run: |
|
||||
rclone version
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make quicktest
|
||||
if: matrix.quicktest
|
||||
|
||||
- name: Race test
|
||||
shell: bash
|
||||
run: |
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Run librclone tests
|
||||
shell: bash
|
||||
run: |
|
||||
make -C librclone/ctest test
|
||||
make -C librclone/ctest clean
|
||||
@@ -199,14 +187,12 @@ jobs:
|
||||
if: matrix.librclonetest
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make compile_all
|
||||
if: matrix.compile_all
|
||||
|
||||
- name: Deploy built binaries
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||
make ci_beta
|
||||
@@ -217,7 +203,7 @@ jobs:
|
||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -225,19 +211,20 @@ jobs:
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
@@ -295,8 +282,12 @@ jobs:
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||
if: github.event_name == 'pull_request'
|
||||
|
||||
android:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -311,10 +302,9 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
@@ -333,7 +323,6 @@ jobs:
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
@@ -347,7 +336,6 @@ jobs:
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
@@ -360,7 +348,6 @@ jobs:
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
@@ -373,7 +360,6 @@ jobs:
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
|
||||
212
.github/workflows/build_android.yml
vendored
Normal file
212
.github/workflows/build_android.yml
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_android.yml" -*-
|
||||
|
||||
name: Build & Push Android Builds
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
android:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- job_name: android-all
|
||||
platform: linux/amd64/android/go1.24
|
||||
os: ubuntu-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
shell: bash
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Set CACHE_PREFIX Variable
|
||||
shell: bash
|
||||
run: |
|
||||
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}
|
||||
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
|
||||
|
||||
- name: Load Go Module Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOMODCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-modcache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-modcache
|
||||
|
||||
# Both load & update the cache when on default branch
|
||||
- name: Load Go Build & Test Cache
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request'
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
# Only load the cache when not on default branch
|
||||
- name: Load Go Build & Test Cache
|
||||
id: go-cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
if: github.ref_name != github.event.repository.default_branch || github.event_name == 'pull_request'
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Build Native rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Install gomobile
|
||||
shell: bash
|
||||
run: |
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a - gomobile build
|
||||
shell: bash
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
|
||||
- name: arm64-v8a - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
|
||||
- name: x86 - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
|
||||
- name: x64 - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
||||
|
||||
- name: Delete Existing Cache
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
|
||||
for cache_id in "${cache_ids[@]}"; do
|
||||
echo "Deleting Cache: $cache_id"
|
||||
gh cache delete "$cache_id"
|
||||
done
|
||||
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request' && steps.go-cache.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Deploy Built Binaries
|
||||
shell: bash
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
@@ -1,77 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
329
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
329
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@@ -0,0 +1,329 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build-image:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/386
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/arm64
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v7
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v6
|
||||
runs-on: ubuntu-24.04-arm
|
||||
|
||||
name: Build Docker Image for ${{ matrix.platform }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
shell: bash
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
shell: bash
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
env:
|
||||
GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
def slugify(input_string, max_length=63):
|
||||
slug = input_string.lower()
|
||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
||||
slug = slug.strip()
|
||||
slug = re.sub(r'\s+', '-', slug)
|
||||
slug = re.sub(r'-+', '-', slug)
|
||||
slug = slug[:max_length]
|
||||
slug = re.sub(r'[-]+$', '', slug)
|
||||
return slug
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME"):
|
||||
if os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
elif os.environ['GITHUB_REF_NAME'] != os.environ['GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH']:
|
||||
ref_name_slug += "-ref-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Set CACHE_PREFIX Variable
|
||||
shell: bash
|
||||
run: |
|
||||
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}-docker-go
|
||||
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
if: github.ref_name == github.event.repository.default_branch
|
||||
with:
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
/tmp/go-build-cache
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
if: github.ref_name != github.event.repository.default_branch
|
||||
with:
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
/tmp/go-build-cache
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"/tmp/go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit || steps.go-cache-restore.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Publish Image Digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
provenance: false
|
||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
||||
# tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }}
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-cache
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Delete Existing Cache
|
||||
if: github.ref_name == github.event.repository.default_branch && steps.go-cache.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
|
||||
for cache_id in "${cache_ids[@]}"; do
|
||||
echo "Deleting Cache: $cache_id"
|
||||
gh cache delete "$cache_id"
|
||||
done
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-image
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
shell: bash
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_NAME }}
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Extract Tags
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
||||
tags_string = " ".join(tags)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"TAGS={tags_string}\n")
|
||||
|
||||
- name: Extract Annotations
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
||||
annotations_string = " ".join(annotations)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create & Push Manifest List
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
${{ env.TAGS }} \
|
||||
${{ env.ANNOTATIONS }} \
|
||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect and Run Multi-Platform Image
|
||||
run: |
|
||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
||||
49
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
49
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
||||
|
||||
name: Release Build for Docker Plugin
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build_docker_volume_plugin:
|
||||
if: inputs.manual || github.repository == 'rclone/rclone'
|
||||
name: Build docker plugin job
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
@@ -1,77 +0,0 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
id: actual_patch_version
|
||||
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||
- name: Get actual minor version
|
||||
id: actual_minor_version
|
||||
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
104
.github/workflows/lint.yml
vendored
Normal file
104
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable lint.yml" -*-
|
||||
|
||||
name: Lint & Vulnerability Check
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
~/.cache/golangci-lint
|
||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
@@ -13,6 +13,7 @@ linters:
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
- gocritic
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
@@ -98,3 +99,46 @@ linters-settings:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
gocritic:
|
||||
# Enable all default checks with some exceptions and some additions (commented).
|
||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Enabled by default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Enabled by default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Enabled by default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Enabled by default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Enabled by default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Not enabled by default
|
||||
#- singleCaseSwitch # Enabled by default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: "${configDir}/bin/rules.go"
|
||||
|
||||
@@ -209,7 +209,7 @@ altogether with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
test_all -backends drive
|
||||
|
||||
### Full integration testing
|
||||
|
||||
@@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
@@ -508,7 +508,7 @@ You'll need to modify the following files
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from genric config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
@@ -571,4 +571,18 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
## Keeping a backend or command out of tree
|
||||
|
||||
Rclone was designed to be modular so it is very easy to keep a backend
|
||||
or a command out of the main rclone source tree.
|
||||
|
||||
So for example if you had a backend which accessed your proprietary
|
||||
systems or a command which was specialised for your needs you could
|
||||
add them out of tree.
|
||||
|
||||
This may be easier than using a plugin and is supported on all
|
||||
platforms not just macOS and Linux.
|
||||
|
||||
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
|
||||
which has an example of an out of tree backend `ram` (which is a
|
||||
renamed version of the `memory` backend).
|
||||
|
||||
44
Dockerfile
44
Dockerfile
@@ -1,19 +1,47 @@
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
RUN echo "**** Set Go Environment Variables ****" && \
|
||||
go env -w GOCACHE=/root/.cache/go-build
|
||||
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
make \
|
||||
bash \
|
||||
gawk \
|
||||
git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN echo "**** Download Go Dependencies ****" && \
|
||||
go mod download -x
|
||||
|
||||
RUN echo "**** Verify Go Dependencies ****" && \
|
||||
go mod verify
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
||||
echo "**** Build Binary ****" && \
|
||||
make
|
||||
|
||||
RUN echo "**** Print Version Binary ****" && \
|
||||
./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
ca-certificates \
|
||||
fuse3 \
|
||||
tzdata && \
|
||||
echo "Enable user_allow_other in fuse" && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@ Current active maintainers of rclone are:
|
||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
8532
MANUAL.html
generated
8532
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
9445
MANUAL.txt
generated
9445
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
12
Makefile
12
Makefile
@@ -88,13 +88,13 @@ test: rclone test_all
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@@ -144,10 +144,14 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
|
||||
33
README.md
33
README.md
@@ -1,20 +1,4 @@
|
||||
<div align="center">
|
||||
<sup>Special thanks to our sponsor:</sup>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
|
||||
<div>
|
||||
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
|
||||
</div>
|
||||
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
|
||||
<div>
|
||||
<sup>Visit warp.dev to learn more.</sup>
|
||||
</div>
|
||||
</a>
|
||||
<br>
|
||||
<hr>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
@@ -55,14 +39,20 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
@@ -76,7 +66,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* MEGA [:page_facing_up:](https://rclone.org/mega/)
|
||||
* MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||
@@ -89,10 +80,12 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
@@ -101,9 +94,12 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
@@ -116,6 +112,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
* Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
27
RELEASE.md
27
RELEASE.md
@@ -47,13 +47,20 @@ Early in the next release cycle update the dependencies.
|
||||
* `git commit -a -v -m "build: update all dependencies"`
|
||||
|
||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||
then go to manual mode. `go1.20` here is the lowest supported version
|
||||
|
||||
go 1.22.0
|
||||
|
||||
then go to manual mode. `go1.22` here is the lowest supported version
|
||||
in the `go.mod`.
|
||||
|
||||
If `make updatedirect` added a `toolchain` directive then remove it.
|
||||
We don't want to force a toolchain on our users. Linux packagers are
|
||||
often using a version of Go that is a few versions out of date.
|
||||
|
||||
```
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.20 -compat=1.20
|
||||
go mod tidy -go=1.22 -compat=1.22
|
||||
```
|
||||
|
||||
If the `go mod tidy` fails use the output from it to remove the
|
||||
@@ -86,6 +93,16 @@ build.
|
||||
Once it compiles locally, push it on a test branch and commit fixes
|
||||
until the tests pass.
|
||||
|
||||
### Major versions
|
||||
|
||||
The above procedure will not upgrade major versions, so v2 to v3.
|
||||
However this tool can show which major versions might need to be
|
||||
upgraded:
|
||||
|
||||
go run github.com/icholy/gomajor@latest list -major
|
||||
|
||||
Expect API breakage when updating major versions.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
@@ -114,8 +131,8 @@ Now
|
||||
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* Do the steps as above
|
||||
* git co master
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
@@ -168,6 +185,8 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
|
||||
|
||||
To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
||||
@@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
||||
configfile.Install()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
config.FileSet(remoteName, "remote", root)
|
||||
config.FileSetValue(remoteName, "type", "alias")
|
||||
config.FileSetValue(remoteName, "remote", root)
|
||||
}
|
||||
|
||||
func TestNewFS(t *testing.T) {
|
||||
|
||||
@@ -10,20 +10,26 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/cloudinary"
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/doi"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filelu"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/iclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/imagekit"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
@@ -39,6 +45,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,16 +3,149 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
func TestBlockIDCreator(t *testing.T) {
|
||||
// Check creation and random number
|
||||
bic, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
bic2, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, bic.random, bic2.random)
|
||||
assert.NotEqual(t, bic.random, [8]byte{})
|
||||
|
||||
// Set random to known value for tests
|
||||
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
chunkNumber := uint64(0xFEDCBA9876543210)
|
||||
|
||||
// Check creation of ID
|
||||
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
||||
got := bic.newBlockID(chunkNumber)
|
||||
assert.Equal(t, want, got)
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
||||
|
||||
// Test checkID is working
|
||||
assert.NoError(t, bic.checkID(chunkNumber, got))
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
||||
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
||||
}
|
||||
|
||||
func (f *Fs) testFeatures(t *testing.T) {
|
||||
// Check first feature flags are set on this remote
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
type ReadSeekCloser struct {
|
||||
*strings.Reader
|
||||
}
|
||||
|
||||
func (r *ReadSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stage a block at remote but don't commit it
|
||||
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
||||
var (
|
||||
containerName, blobPath = f.split(remote)
|
||||
containerClient = f.cntSVC(containerName)
|
||||
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
||||
data = "uncommitted data"
|
||||
blockID = "1"
|
||||
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
||||
)
|
||||
r := &ReadSeekCloser{strings.NewReader(data)}
|
||||
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the block is staged but not committed
|
||||
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, block := range blockList.UncommittedBlocks {
|
||||
if *block.Name == blockIDBase64 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.True(t, found, "Block ID not found in uncommitted blocks")
|
||||
}
|
||||
|
||||
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
||||
//
|
||||
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
||||
//
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
||||
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
remote = "testBlob"
|
||||
)
|
||||
|
||||
// Multipart copy the blob please
|
||||
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
||||
f.opt.UseCopyBlob = false
|
||||
f.opt.CopyCutoff = f.opt.ChunkSize
|
||||
defer func() {
|
||||
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
||||
}()
|
||||
|
||||
// Create a blob with uncommitted blocks
|
||||
f.stageBlockWithoutCommit(ctx, t, remote)
|
||||
|
||||
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
||||
|
||||
// Check the object does not exist
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
||||
size := 4*int(f.opt.ChunkSize) - 1
|
||||
contents := random.String(size)
|
||||
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
|
||||
// Check size
|
||||
assert.Equal(t, int64(size), o.Size())
|
||||
|
||||
// Create a new blob with uncommitted blocks
|
||||
newRemote := "testBlob2"
|
||||
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
||||
|
||||
// Copy over that block
|
||||
dst, err := f.Copy(ctx, o, newRemote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check basics
|
||||
assert.Equal(t, int64(size), dst.Size())
|
||||
assert.Equal(t, newRemote, dst.Remote())
|
||||
|
||||
// Check contents
|
||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||
assert.Equal(t, contents, gotContents)
|
||||
|
||||
// Remove the object
|
||||
require.NoError(t, dst.Remove(ctx))
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Features", f.testFeatures)
|
||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||
}
|
||||
|
||||
@@ -15,13 +15,17 @@ import (
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestAzureBlob"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -40,6 +44,7 @@ func TestIntegration2(t *testing.T) {
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -48,8 +53,13 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setCopyCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
|
||||
@@ -237,6 +237,30 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_instance_discovery",
|
||||
Help: `Skip requesting Microsoft Entra instance metadata
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||
authenticating.
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_az",
|
||||
Help: `Use Azure CLI tool az for authentication
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set env_auth at the same time.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
@@ -319,10 +343,12 @@ type Options struct {
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
||||
@@ -393,8 +419,10 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
policyClientOptions := policy.ClientOptions{
|
||||
Transport: newTransporter(ctx),
|
||||
}
|
||||
backup := service.ShareTokenIntentBackup
|
||||
clientOpt := service.ClientOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
FileRequestIntent: &backup,
|
||||
}
|
||||
|
||||
// Here we auth by setting one of cred, sharedKeyCred or f.client
|
||||
@@ -412,7 +440,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
// Read credentials from the environment
|
||||
options := azidentity.DefaultAzureCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
@@ -423,6 +452,13 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
fmt.Println(cred)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.SASURL != "":
|
||||
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
|
||||
if err != nil {
|
||||
@@ -480,6 +516,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
||||
// User with username and password
|
||||
//nolint:staticcheck // this is deprecated due to Azure policy
|
||||
options := azidentity.UsernamePasswordCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
@@ -532,6 +569,38 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "":
|
||||
// Workload Identity based authentication
|
||||
var options azidentity.ManagedIdentityCredentialOptions
|
||||
options.ID = azidentity.ClientID(opt.MSIClientID)
|
||||
|
||||
msiCred, err := azidentity.NewManagedIdentityCredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
|
||||
getClientAssertions := func(context.Context) (string, error) {
|
||||
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
|
||||
Scopes: []string{"api://AzureADTokenExchange"},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
|
||||
return token.Token, nil
|
||||
}
|
||||
|
||||
assertOpts := &azidentity.ClientAssertionCredentialOptions{}
|
||||
cred, err = azidentity.NewClientAssertionCredential(
|
||||
opt.Tenant,
|
||||
opt.ClientID,
|
||||
getClientAssertions,
|
||||
assertOpts)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire client assertion token: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("no authentication method configured")
|
||||
}
|
||||
@@ -885,7 +954,7 @@ func (o *Object) setMetadata(resp *file.GetPropertiesResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
// getMetadata gets the metadata if it hasn't already been fetched
|
||||
func (o *Object) getMetadata(ctx context.Context) error {
|
||||
resp, err := o.fileClient().GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
@@ -897,7 +966,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
//
|
||||
// May make a network request becaue the [fs.List] method does not
|
||||
// May make a network request because the [fs.List] method does not
|
||||
// return MD5 hashes for DirEntry
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
@@ -1035,12 +1104,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
|
||||
return fmt.Errorf("update: unable to create file: %w", createErr)
|
||||
}
|
||||
} else {
|
||||
} else if size != o.Size() {
|
||||
// Resize the file if needed
|
||||
if size != o.Size() {
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
||||
|
||||
func randomString(charCount int) string {
|
||||
strBldr := strings.Builder{}
|
||||
for i := 0; i < charCount; i++ {
|
||||
for range charCount {
|
||||
randPos := rand.Int63n(52)
|
||||
strBldr.WriteByte(chars[randPos])
|
||||
}
|
||||
|
||||
@@ -42,9 +42,10 @@ type Bucket struct {
|
||||
|
||||
// LifecycleRule is a single lifecycle rule
|
||||
type LifecycleRule struct {
|
||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
@@ -129,10 +130,10 @@ type AuthorizeAccountResponse struct {
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
|
||||
@@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
}
|
||||
|
||||
134
backend/b2/b2.go
134
backend/b2/b2.go
@@ -16,6 +16,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -30,7 +31,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
@@ -588,12 +590,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
|
||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||
func (f *Fs) hasPermission(permission string) bool {
|
||||
for _, capability := range f.info.Allowed.Capabilities {
|
||||
if capability == permission {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
@@ -921,7 +918,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
list := list.NewHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
last := ""
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
@@ -1274,7 +1271,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(f.ci.Transfers)
|
||||
for i := 0; i < f.ci.Transfers; i++ {
|
||||
for range f.ci.Transfers {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for object := range toBeDeleted {
|
||||
@@ -1317,16 +1314,22 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||
// Check current version of the file
|
||||
if deleteHidden && object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "delete") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
}
|
||||
last = remote
|
||||
tr.Done(ctx, nil)
|
||||
@@ -1593,7 +1596,11 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
o.size = Size
|
||||
// Use the UploadTimestamp if can't get file info
|
||||
o.modTime = time.Time(UploadTimestamp)
|
||||
return o.parseTimeString(Info[timeKey])
|
||||
err = o.parseTimeString(Info[timeKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from an api.File
|
||||
@@ -1666,6 +1673,21 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
return o.getMetaDataListing(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// If using versionAt we need to list the find the correct version.
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
info, err := o.getMetaDataListing(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.Action == "hide" {
|
||||
// Rerturn object not found error if the current version is deleted.
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
_, info, err = o.getOrHead(ctx, "HEAD", nil)
|
||||
return info, err
|
||||
}
|
||||
@@ -1695,6 +1717,16 @@ func timeString(modTime time.Time) string {
|
||||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||
}
|
||||
|
||||
// parseTimeStringHelper converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time
|
||||
func parseTimeStringHelper(timeString string) (time.Time, error) {
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
@@ -1702,12 +1734,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
if timeString == "" {
|
||||
return nil
|
||||
}
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
modTime, err := parseTimeStringHelper(timeString)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return nil
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1861,13 +1893,19 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
ContentType: resp.Header.Get("Content-Type"),
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
// length read from the listing.
|
||||
// Additionally, the official examples return S3 headers
|
||||
// instead of native, i.e. no file ID, use ones from listing.
|
||||
if info.Size < 0 {
|
||||
info.Size = o.size
|
||||
}
|
||||
if info.ID == "" {
|
||||
info.ID = o.id
|
||||
}
|
||||
return resp, info, nil
|
||||
}
|
||||
|
||||
@@ -1917,7 +1955,7 @@ func init() {
|
||||
// urlEncode encodes in with % encoding
|
||||
func urlEncode(in string) string {
|
||||
var out bytes.Buffer
|
||||
for i := 0; i < len(in); i++ {
|
||||
for i := range len(in) {
|
||||
c := in[i]
|
||||
if noNeedToEncode[c] {
|
||||
_ = out.WriteByte(c)
|
||||
@@ -1958,7 +1996,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
if err == nil {
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
|
||||
if err != nil {
|
||||
o.fs.putRW(rw)
|
||||
return err
|
||||
@@ -1990,7 +2028,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.decodeMetaDataFileInfo(up.info)
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||
if calculatedSha1 == "" {
|
||||
@@ -2095,6 +2136,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
|
||||
// When metadata support is added to b2, this method will need a more generic name
|
||||
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k = strings.ToLower(k)
|
||||
// For now, the only metadata we're concerned with is "mtime"
|
||||
switch k {
|
||||
case "mtime":
|
||||
// mtime in meta overrides source ModTime
|
||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||
} else {
|
||||
modTime = metaModTime
|
||||
}
|
||||
default:
|
||||
// Do nothing for now
|
||||
}
|
||||
}
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
@@ -2126,7 +2197,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
return info, up, err
|
||||
}
|
||||
|
||||
@@ -2172,6 +2243,7 @@ This will dump something like this showing the lifecycle rules.
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
@@ -2198,12 +2270,13 @@ overwrites will still cause versions to be made.
|
||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
|
||||
},
|
||||
}
|
||||
|
||||
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
var newRule api.LifecycleRule
|
||||
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
|
||||
days, err := strconv.Atoi(daysStr)
|
||||
@@ -2219,14 +2292,23 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
||||
}
|
||||
newRule.DaysFromUploadingToHiding = &days
|
||||
}
|
||||
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
|
||||
days, err := strconv.Atoi(daysStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
|
||||
}
|
||||
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
|
||||
}
|
||||
bucketName, _ := f.split("")
|
||||
if bucketName == "" {
|
||||
return nil, errors.New("bucket required")
|
||||
|
||||
}
|
||||
|
||||
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
|
||||
|
||||
var bucket *api.Bucket
|
||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
|
||||
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
|
||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2283,7 +2365,7 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
},
|
||||
}
|
||||
|
||||
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
maxAge := defaultMaxAge
|
||||
if opt["max-age"] != "" {
|
||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||
@@ -2306,7 +2388,7 @@ it would do.
|
||||
`,
|
||||
}
|
||||
|
||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
return nil, f.cleanUp(ctx, true, false, 0)
|
||||
}
|
||||
|
||||
@@ -2325,7 +2407,7 @@ var commandHelp = []fs.CommandHelp{
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "lifecycle":
|
||||
return f.lifecycleCommand(ctx, name, arg, opt)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
@@ -184,57 +186,120 @@ func TestParseTimeString(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
// This is adapted from the s3 equivalent.
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
original := random.String(1000)
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, mimeType, nil)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We currently have a limited amount of metadata to test with B2
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
||||
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
||||
var headers = make(map[string]string)
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
headers[k[len(headerPrefix):]] = v
|
||||
}
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
return headers
|
||||
}
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
||||
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
||||
t.Run(what, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(size)
|
||||
require.NoError(t, err)
|
||||
original := random.String(int(ss))
|
||||
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
if chunkSize != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(chunkSize)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadChunkSize(ss)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
if uploadCutoff != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(uploadCutoff)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadCutoff(ss)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
metadata := fs.Metadata{
|
||||
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
||||
|
||||
"mtime": "2009-05-06T04:05:06.499Z",
|
||||
}
|
||||
|
||||
// Need to specify HTTP options with the header prefix since they are passed as-is
|
||||
options := []fs.OpenOption{
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
||||
}
|
||||
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// X-Bz-Info-a & X-Bz-Info-b
|
||||
optMetadata := OpenOptionToMetaData(options)
|
||||
for k, v := range optMetadata {
|
||||
got := gotMetadata.Info[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
// 1 kB regular file
|
||||
f.internalTestMetadata(t, "1kiB", "", "")
|
||||
|
||||
// 10 MiB large file
|
||||
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
||||
}
|
||||
|
||||
func sha1Sum(t *testing.T, s string) string {
|
||||
hash := sha1.Sum([]byte(s))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
@@ -381,37 +446,174 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
t.Run("List", func(t *testing.T) {
|
||||
fstest.CheckListing(t, f, test.want)
|
||||
})
|
||||
// b2 NewObject doesn't work with VersionAt
|
||||
//t.Run("NewObject", func(t *testing.T) {
|
||||
// gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||
// assert.Equal(t, test.wantErr, gotErr)
|
||||
// if gotErr == nil {
|
||||
// assert.Equal(t, test.wantSize, gotObj.Size())
|
||||
// }
|
||||
//})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
if gotErr == nil {
|
||||
assert.Equal(t, test.wantSize, gotObj.Size())
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Cleanup", func(t *testing.T) {
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
// Set --b2-versions for this test
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
fstest.CheckListing(t, f, items)
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should be unchanged after dry run
|
||||
before := listAllFiles(ctx, t, f, dirName)
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
after := listAllFiles(ctx, t, f, dirName)
|
||||
assert.Equal(t, before, after)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should reflect current state after cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
})
|
||||
})
|
||||
|
||||
// Purge gets tested later
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// B2CleanupHidden tests cleaning up hidden files
|
||||
t.Run("CleanupUnfinished", func(t *testing.T) {
|
||||
dirName := "unfinished"
|
||||
fileCount := 5
|
||||
expectedFiles := []string{}
|
||||
for i := 1; i < fileCount; i++ {
|
||||
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
|
||||
expectedFiles = append(expectedFiles, fileName)
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: fileName,
|
||||
}
|
||||
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
||||
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// Listing should not change after dry run
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
// Listing should be empty after real cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, []string{})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
|
||||
bucket, directory := f.split(dirName)
|
||||
foundFiles := []string{}
|
||||
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if !isDirectory {
|
||||
foundFiles = append(foundFiles, object.Name)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
sort.Strings(foundFiles)
|
||||
return foundFiles
|
||||
}
|
||||
|
||||
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
|
||||
foundFiles := listAllFiles(ctx, t, f, dirName)
|
||||
sort.Strings(expectedFiles)
|
||||
assert.Equal(t, expectedFiles, foundFiles)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
opt := map[string]string{}
|
||||
|
||||
t.Run("InitState", func(t *testing.T) {
|
||||
// There should be no lifecycle rules at the outset
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// There should still be no lifecycle rules after each dry run operation
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
})
|
||||
}
|
||||
|
||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("Versions", f.InternalTestVersions)
|
||||
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
|
||||
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -91,7 +91,7 @@ type largeUpload struct {
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := 0
|
||||
chunkSize := defaultChunkSize
|
||||
@@ -104,11 +104,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
parts++
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -118,12 +113,27 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
BucketID: bucketID,
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
}
|
||||
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
||||
if newInfo == nil {
|
||||
modTime := src.ModTime(ctx)
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request.ContentType = fs.MimeType(ctx, src)
|
||||
request.Info = map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
}
|
||||
// Custom upload headers - remove header prefix since they are sent in the body
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
request.Info[k[len(headerPrefix):]] = v
|
||||
} else {
|
||||
optionsToSend = append(optionsToSend, option)
|
||||
}
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
@@ -134,6 +144,11 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
request.ContentType = newInfo.ContentType
|
||||
request.Info = newInfo.Info
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
Options: optionsToSend,
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
@@ -463,17 +478,14 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
||||
remaining = up.size
|
||||
)
|
||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
||||
for part := 0; part < up.parts; part++ {
|
||||
for part := range up.parts {
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= up.chunkSize {
|
||||
reqSize = up.chunkSize
|
||||
}
|
||||
reqSize := min(remaining, up.chunkSize)
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
|
||||
@@ -43,9 +43,9 @@ import (
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,12 +64,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -239,8 +237,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
||||
signingHeaders := map[string]interface{}{
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any {
|
||||
signingHeaders := map[string]any{
|
||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
return signingHeaders
|
||||
@@ -256,8 +254,10 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
||||
}
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if block == nil {
|
||||
return nil, errors.New("box: failed to PEM decode private key")
|
||||
}
|
||||
if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||
}
|
||||
@@ -619,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
// fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
@@ -966,6 +966,26 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check if dest already exists
|
||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if item != nil { // dest already exists, need to copy to temp name and then move
|
||||
tempSuffix := "-rclone-copy-" + random.String(8)
|
||||
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
|
||||
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
|
||||
err = f.deleteObject(ctx, item.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.Move(ctx, tempObj, remote)
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -1323,12 +1343,8 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
nextStreamPosition = streamPosition
|
||||
|
||||
for {
|
||||
limit := f.opt.ListChunk
|
||||
|
||||
// box only allows a max of 500 events
|
||||
if limit > 500 {
|
||||
limit = 500
|
||||
}
|
||||
limit := min(f.opt.ListChunk, 500)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
|
||||
@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
for tries = range maxTries {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
if err != nil {
|
||||
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
outer:
|
||||
for part := 0; part < session.TotalParts; part++ {
|
||||
for part := range session.TotalParts {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
@@ -211,10 +211,7 @@ outer:
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
reqSize := min(remaining, chunkSize)
|
||||
|
||||
// Make a block of memory
|
||||
buf := make([]byte, reqSize)
|
||||
|
||||
35
backend/cache/cache.go
vendored
35
backend/cache/cache.go
vendored
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
@@ -409,18 +410,16 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else {
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1088,13 +1087,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return cachedEntries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
|
||||
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
|
||||
entries, err := f.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(entries); i++ {
|
||||
for i := range entries {
|
||||
innerDir, ok := entries[i].(fs.Directory)
|
||||
if ok {
|
||||
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||
@@ -1140,7 +1139,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
|
||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||
list := walk.NewListRHelper(callback)
|
||||
list := list.NewHelper(callback)
|
||||
err = f.recurse(ctx, dir, list)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1430,7 +1429,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
||||
}()
|
||||
|
||||
// wait until both are done
|
||||
for c := 0; c < 2; c++ {
|
||||
for range 2 {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
@@ -1755,7 +1754,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
|
||||
// Stats returns stats about the cache storage
|
||||
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
|
||||
func (f *Fs) Stats() (map[string]map[string]any, error) {
|
||||
return f.cache.Stats()
|
||||
}
|
||||
|
||||
@@ -1935,7 +1934,7 @@ var commandHelp = []fs.CommandHelp{
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
|
||||
switch name {
|
||||
case "stats":
|
||||
return f.Stats()
|
||||
|
||||
103
backend/cache/cache_internal_test.go
vendored
103
backend/cache/cache_internal_test.go
vendored
@@ -10,7 +10,6 @@ import (
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
@@ -33,7 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -93,7 +92,7 @@ func TestMain(m *testing.M) {
|
||||
goflag.Parse()
|
||||
var rc int
|
||||
|
||||
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
|
||||
runInstance = newRun()
|
||||
rc = m.Run()
|
||||
os.Exit(rc)
|
||||
@@ -123,10 +122,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
|
||||
/* TODO: is this testing something?
|
||||
func TestInternalVfsCache(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||
vfscommon.Opt.DirCacheTime = time.Second * 30
|
||||
testSize := int64(524288000)
|
||||
|
||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
@@ -338,7 +337,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
@@ -361,14 +360,14 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||
|
||||
for i := 0; i < len(checkSample); i++ {
|
||||
for i := range checkSample {
|
||||
require.Equal(t, testData[i], checkSample[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
@@ -388,7 +387,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(readData); i++ {
|
||||
for i := range readData {
|
||||
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
|
||||
}
|
||||
}
|
||||
@@ -408,7 +407,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
// update in the wrapped fs
|
||||
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
||||
require.NoError(t, err)
|
||||
log.Printf("original size: %v", originalSize)
|
||||
fs.Logf(nil, "original size: %v", originalSize)
|
||||
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
@@ -417,7 +416,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
if runInstance.rootIsCrypt {
|
||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||
require.NoError(t, err)
|
||||
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
||||
expectedSize++ // FIXME newline gets in, likely test data issue
|
||||
} else {
|
||||
data2 = []byte("test content")
|
||||
}
|
||||
@@ -425,7 +424,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data2)), o.Size())
|
||||
log.Printf("updated size: %v", len(data2))
|
||||
fs.Logf(nil, "updated size: %v", len(data2))
|
||||
|
||||
// get a new instance from the cache
|
||||
if runInstance.wrappedIsExternal {
|
||||
@@ -485,49 +484,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
err = runInstance.retryBlock(func() error {
|
||||
li, err := runInstance.list(t, rootFs, "test")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 2 {
|
||||
log.Printf("not expected listing /test: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test: %v", li)
|
||||
return fmt.Errorf("not expected listing /test: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 0 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/second")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/second: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test/second: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "data.bin" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/second/data.bin" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
fs.Logf(nil, "unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
|
||||
log.Printf("complete listing: %v", li)
|
||||
fs.Logf(nil, "complete listing: %v", li)
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
@@ -577,43 +576,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
err = runInstance.retryBlock(func() error {
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
if !found {
|
||||
log.Printf("not found /test")
|
||||
fs.Logf(nil, "not found /test")
|
||||
return fmt.Errorf("not found /test")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one")
|
||||
fs.Logf(nil, "not found /test/one")
|
||||
return fmt.Errorf("not found /test/one")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one/test2")
|
||||
fs.Logf(nil, "not found /test/one/test2")
|
||||
return fmt.Errorf("not found /test/one/test2")
|
||||
}
|
||||
li, err := runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "test2" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/one/test2" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
fs.Logf(nil, "unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
log.Printf("complete listing /test/one/test2")
|
||||
fs.Logf(nil, "complete listing /test/one/test2")
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
@@ -689,7 +688,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
co, ok := o.(*cache.Object)
|
||||
require.True(t, ok)
|
||||
|
||||
for i := 0; i < 4; i++ { // read first 4
|
||||
for i := range 4 { // read first 4
|
||||
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||
}
|
||||
cfs.CleanUpCache(true)
|
||||
@@ -708,7 +707,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -743,7 +742,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
@@ -771,24 +770,24 @@ func TestInternalBug2117(t *testing.T) {
|
||||
|
||||
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 1)
|
||||
|
||||
time.Sleep(time.Second * 30)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 1)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test/dir1")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 4)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 4)
|
||||
}
|
||||
|
||||
@@ -829,7 +828,7 @@ func newRun() *run {
|
||||
} else {
|
||||
r.tmpUploadDir = uploadDir
|
||||
}
|
||||
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
|
||||
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
|
||||
|
||||
return r
|
||||
}
|
||||
@@ -850,8 +849,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.FileSections() {
|
||||
if s == remote {
|
||||
for _, s := range config.GetRemotes() {
|
||||
if s.Name == remote {
|
||||
remoteExists = true
|
||||
}
|
||||
}
|
||||
@@ -875,12 +874,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
cacheRemote := remote
|
||||
if !remoteExists {
|
||||
localRemote := remote + "-local"
|
||||
config.FileSet(localRemote, "type", "local")
|
||||
config.FileSet(localRemote, "nounc", "true")
|
||||
config.FileSetValue(localRemote, "type", "local")
|
||||
config.FileSetValue(localRemote, "nounc", "true")
|
||||
m.Set("type", "cache")
|
||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||
} else {
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
remoteType := config.GetValue(remote, "type")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
@@ -891,14 +890,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
m.Set("password", cryptPassword1)
|
||||
m.Set("password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := config.FileGet(remote, "remote")
|
||||
remoteRemote := config.GetValue(remote, "remote")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := config.FileGet(remoteWrapping, "type")
|
||||
remoteType := config.GetValue(remoteWrapping, "type")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
@@ -972,7 +971,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cnt); i++ {
|
||||
for range int(cnt) {
|
||||
data := randStringBytes(int(chunk))
|
||||
_, _ = f.Write(data)
|
||||
}
|
||||
@@ -1086,9 +1085,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
|
||||
var err error
|
||||
var l []interface{}
|
||||
var l []any
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
@@ -1192,7 +1191,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||
if r.rootIsCrypt {
|
||||
denominator := int64(65536 + 16)
|
||||
size = size - 32
|
||||
size -= 32
|
||||
quotient := size / denominator
|
||||
remainder := size % denominator
|
||||
return (quotient*65536 + remainder - 16)
|
||||
@@ -1216,7 +1215,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
||||
var err error
|
||||
var state cache.BackgroundUploadState
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
select {
|
||||
case state = <-buCh:
|
||||
// continue
|
||||
@@ -1294,7 +1293,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
|
||||
|
||||
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
|
||||
var err error
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
for range maxRetries {
|
||||
err = block()
|
||||
if err == nil {
|
||||
return nil
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -17,7 +17,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
|
||||
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@@ -162,7 +162,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
for i := range totalFiles {
|
||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
|
||||
14
backend/cache/handle.go
vendored
14
backend/cache/handle.go
vendored
@@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < r.workers; i++ {
|
||||
for i := range r.workers {
|
||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
@@ -208,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
chunkStart -= offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
@@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||
for i := range r.cacheFs().opt.ReadRetries * 8 {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
@@ -327,7 +327,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
@@ -415,10 +415,8 @@ func (w *worker) run() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
|
||||
8
backend/cache/plex.go
vendored
8
backend/cache/plex.go
vendored
@@ -209,7 +209,7 @@ func (p *plexConnector) authenticate() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data map[string]interface{}
|
||||
var data map[string]any
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %w", err)
|
||||
@@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
}
|
||||
|
||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
func get(m any, path ...any) (any, bool) {
|
||||
for _, p := range path {
|
||||
switch idx := p.(type) {
|
||||
case string:
|
||||
if mm, ok := m.(map[string]interface{}); ok {
|
||||
if mm, ok := m.(map[string]any); ok {
|
||||
if val, found := mm[idx]; found {
|
||||
m = val
|
||||
continue
|
||||
@@ -285,7 +285,7 @@ func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
}
|
||||
return nil, false
|
||||
case int:
|
||||
if mm, ok := m.([]interface{}); ok {
|
||||
if mm, ok := m.([]any); ok {
|
||||
if len(mm) > idx {
|
||||
m = mm[idx]
|
||||
continue
|
||||
|
||||
11
backend/cache/storage_persistent.go
vendored
11
backend/cache/storage_persistent.go
vendored
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.etcd.io/bbolt/errors"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -597,7 +598,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if err == bolt.ErrDatabaseNotOpen {
|
||||
if err == errors.ErrDatabaseNotOpen {
|
||||
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
|
||||
return
|
||||
}
|
||||
@@ -606,16 +607,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
}
|
||||
|
||||
// Stats returns a go map with the stats key values
|
||||
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
||||
r := make(map[string]map[string]interface{})
|
||||
r["data"] = make(map[string]interface{})
|
||||
func (b *Persistent) Stats() (map[string]map[string]any, error) {
|
||||
r := make(map[string]map[string]any)
|
||||
r["data"] = make(map[string]any)
|
||||
r["data"]["oldest-ts"] = time.Now()
|
||||
r["data"]["oldest-file"] = ""
|
||||
r["data"]["newest-ts"] = time.Now()
|
||||
r["data"]["newest-file"] = ""
|
||||
r["data"]["total-chunks"] = 0
|
||||
r["data"]["total-size"] = int64(0)
|
||||
r["files"] = make(map[string]interface{})
|
||||
r["files"] = make(map[string]any)
|
||||
r["files"]["oldest-ts"] = time.Now()
|
||||
r["files"]["oldest-name"] = ""
|
||||
r["files"]["newest-ts"] = time.Now()
|
||||
|
||||
@@ -356,7 +356,8 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
||||
|
||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||
f.features.ListR = nil // Recursive listing may cause chunker skip files
|
||||
f.features.ListP = nil // ListP not supported yet
|
||||
|
||||
return f, err
|
||||
}
|
||||
@@ -632,7 +633,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
|
||||
|
||||
// forbidChunk prints error message or raises error if file is chunk.
|
||||
// First argument sets log prefix, use `false` to suppress message.
|
||||
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
|
||||
func (f *Fs) forbidChunk(o any, filePath string) error {
|
||||
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
|
||||
if f.opt.FailHard {
|
||||
return fmt.Errorf("chunk overlap with %q", parentPath)
|
||||
@@ -680,7 +681,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
||||
circleSec := unixSec % closestPrimeZzzzSeconds
|
||||
first4chars := strconv.FormatInt(circleSec, 36)
|
||||
|
||||
for tries := 0; tries < maxTransactionProbes; tries++ {
|
||||
for range maxTransactionProbes {
|
||||
f.xactIDMutex.Lock()
|
||||
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
|
||||
f.xactIDMutex.Unlock()
|
||||
@@ -987,7 +988,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
}
|
||||
}
|
||||
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
if o.main == nil && len(o.chunks) == 0 {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta || quickScan {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
@@ -1189,10 +1190,7 @@ func (f *Fs) put(
|
||||
}
|
||||
|
||||
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
|
||||
size := c.sizeLeft
|
||||
if size > c.chunkSize {
|
||||
size = c.chunkSize
|
||||
}
|
||||
size := min(c.sizeLeft, c.chunkSize)
|
||||
savedReadCount := c.readCount
|
||||
|
||||
// If a single chunk is expected, avoid the extra rename operation
|
||||
@@ -1477,10 +1475,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
||||
const bufLen = 1048576 // 1 MiB
|
||||
buf := make([]byte, bufLen)
|
||||
for size > 0 {
|
||||
n := size
|
||||
if n > bufLen {
|
||||
n = bufLen
|
||||
}
|
||||
n := min(size, bufLen)
|
||||
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1866,6 +1861,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
|
||||
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.NameTransform = nil // ensure operations.Move does not double-transform here
|
||||
var (
|
||||
dest fs.Object
|
||||
err error
|
||||
@@ -2480,7 +2477,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
|
||||
@@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
})
|
||||
}
|
||||
|
||||
type settings map[string]interface{}
|
||||
type settings map[string]any
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
|
||||
@@ -46,6 +46,7 @@ func TestIntegration(t *testing.T) {
|
||||
"DirCacheFlush",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
"ListP",
|
||||
},
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
|
||||
48
backend/cloudinary/api/types.go
Normal file
48
backend/cloudinary/api/types.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Package api has type definitions for cloudinary
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CloudinaryEncoder extends the built-in encoder
|
||||
type CloudinaryEncoder interface {
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in this encoding.
|
||||
FromStandardPath(string) string
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in this encoding.
|
||||
FromStandardName(string) string
|
||||
// ToStandardPath takes a / separated path in this encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
ToStandardPath(string) string
|
||||
// ToStandardName takes name in this encoding and converts
|
||||
// it in Standard encoding.
|
||||
ToStandardName(string, string) string
|
||||
// Encoded root of the remote (as passed into NewFs)
|
||||
FromStandardFullPath(string) string
|
||||
}
|
||||
|
||||
// UpdateOptions was created to pass options from Update to Put
|
||||
type UpdateOptions struct {
|
||||
PublicID string
|
||||
ResourceType string
|
||||
DeliveryType string
|
||||
AssetFolder string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
// Header formats the option as a string
|
||||
func (o *UpdateOptions) Header() (string, string) {
|
||||
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
||||
}
|
||||
|
||||
// Mandatory returns whether the option must be parsed or can be ignored
|
||||
func (o *UpdateOptions) Mandatory() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// String formats the option into human-readable form
|
||||
func (o *UpdateOptions) String() string {
|
||||
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
||||
}
|
||||
754
backend/cloudinary/cloudinary.go
Normal file
754
backend/cloudinary/cloudinary.go
Normal file
@@ -0,0 +1,754 @@
|
||||
// Package cloudinary provides an interface to the Cloudinary DAM
|
||||
package cloudinary
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudinary/cloudinary-go/v2"
|
||||
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/admin"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
|
||||
"github.com/rclone/rclone/backend/cloudinary/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/zeebo/blake3"
|
||||
)
|
||||
|
||||
// Cloudinary shouldn't have a trailing dot if there is no path
|
||||
func cldPathDir(somePath string) string {
|
||||
if somePath == "" || somePath == "." {
|
||||
return somePath
|
||||
}
|
||||
dir := path.Dir(somePath)
|
||||
if dir == "." {
|
||||
return ""
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "cloudinary",
|
||||
Description: "Cloudinary",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "cloud_name",
|
||||
Help: "Cloudinary Environment Name",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "Cloudinary API Key",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "api_secret",
|
||||
Help: "Cloudinary API Secret",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "upload_prefix",
|
||||
Help: "Specify the API endpoint for environments out of the US",
|
||||
},
|
||||
{
|
||||
Name: "upload_preset",
|
||||
Help: "Upload Preset to select asset manipulation on upload",
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeAsterisk |
|
||||
encoder.EncodePipe |
|
||||
encoder.EncodeHash |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeDot),
|
||||
},
|
||||
{
|
||||
Name: "eventually_consistent_delay",
|
||||
Default: fs.Duration(0),
|
||||
Advanced: true,
|
||||
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
|
||||
},
|
||||
{
|
||||
Name: "adjust_media_files_extensions",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
|
||||
},
|
||||
{
|
||||
Name: "media_extensions",
|
||||
Default: []string{
|
||||
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
|
||||
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
|
||||
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
|
||||
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
|
||||
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
|
||||
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
|
||||
Advanced: true,
|
||||
Help: "Cloudinary supported media extensions",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
CloudName string `config:"cloud_name"`
|
||||
APIKey string `config:"api_key"`
|
||||
APISecret string `config:"api_secret"`
|
||||
UploadPrefix string `config:"upload_prefix"`
|
||||
UploadPreset string `config:"upload_preset"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
||||
MediaExtensions []string `config:"media_extensions"`
|
||||
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
|
||||
}
|
||||
|
||||
// Fs represents a remote cloudinary server
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
pacer *fs.Pacer
|
||||
srv *rest.Client // For downloading assets via the Cloudinary CDN
|
||||
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
|
||||
lastCRUD time.Time
|
||||
}
|
||||
|
||||
// Object describes a cloudinary object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
url string
|
||||
md5sum string
|
||||
publicID string
|
||||
resourceType string
|
||||
deliveryType string
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the Cloudinary client
|
||||
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
|
||||
}
|
||||
cld.Admin.Client = *fshttp.NewClient(ctx)
|
||||
cld.Upload.Client = *fshttp.NewClient(ctx)
|
||||
if opt.UploadPrefix != "" {
|
||||
cld.Config.API.UploadPrefix = opt.UploadPrefix
|
||||
}
|
||||
client := fshttp.NewClient(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
cld: cld,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
|
||||
srv: rest.NewClient(client),
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if root != "" {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = cldPathDir(root)
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
||||
// File doesn't exist so return the previous root
|
||||
f.root = root
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// FromStandardPath implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardPath(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
// FromStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardName(s string) string {
|
||||
if f.opt.AdjustMediaFilesExtensions {
|
||||
parsedURL, err := url.Parse(s)
|
||||
ext := ""
|
||||
if err != nil {
|
||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
||||
} else {
|
||||
ext = path.Ext(parsedURL.Path)
|
||||
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
||||
s = strings.TrimSuffix(parsedURL.Path, ext)
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
// ToStandardPath implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardPath(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
|
||||
}
|
||||
|
||||
// ToStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardName(s string, assetURL string) string {
|
||||
ext := ""
|
||||
if f.opt.AdjustMediaFilesExtensions {
|
||||
parsedURL, err := url.Parse(assetURL)
|
||||
if err != nil {
|
||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
||||
} else {
|
||||
ext = path.Ext(parsedURL.Path)
|
||||
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
||||
ext = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
|
||||
}
|
||||
|
||||
// FromStandardFullPath encodes a full path to Cloudinary standard
|
||||
func (f *Fs) FromStandardFullPath(dir string) string {
|
||||
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
|
||||
}
|
||||
|
||||
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
|
||||
func (f *Fs) ToAssetFolderAPI(dir string) string {
|
||||
return strings.ReplaceAll(dir, "%", "%25")
|
||||
}
|
||||
|
||||
// ToDisplayNameElastic encodes a special case of elasticsearch
|
||||
func (f *Fs) ToDisplayNameElastic(dir string) string {
|
||||
return strings.ReplaceAll(dir, "!", "\\!")
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// WaitEventuallyConsistent waits till the FS is eventually consistent
|
||||
func (f *Fs) WaitEventuallyConsistent() {
|
||||
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
|
||||
return
|
||||
}
|
||||
delay := time.Duration(f.opt.EventuallyConsistentDelay)
|
||||
timeSinceLastCRUD := time.Since(f.lastCRUD)
|
||||
if timeSinceLastCRUD < delay {
|
||||
time.Sleep(delay - timeSinceLastCRUD)
|
||||
}
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Cloudinary root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
remotePrefix := f.FromStandardFullPath(dir)
|
||||
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
|
||||
remotePrefix += "/"
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
dirs := make(map[string]struct{})
|
||||
nextCursor := ""
|
||||
f.WaitEventuallyConsistent()
|
||||
for {
|
||||
// user the folders api to list folders.
|
||||
folderParams := admin.SubFoldersParams{
|
||||
Folder: f.ToAssetFolderAPI(remotePrefix),
|
||||
MaxResults: 500,
|
||||
}
|
||||
if nextCursor != "" {
|
||||
folderParams.NextCursor = nextCursor
|
||||
}
|
||||
|
||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
|
||||
}
|
||||
if results.Error.Message != "" {
|
||||
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
|
||||
}
|
||||
|
||||
for _, folder := range results.Folders {
|
||||
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
|
||||
parts := strings.Split(relativePath, "/")
|
||||
|
||||
// It's a directory
|
||||
dirName := parts[len(parts)-1]
|
||||
if _, found := dirs[dirName]; !found {
|
||||
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
|
||||
entries = append(entries, d)
|
||||
dirs[dirName] = struct{}{}
|
||||
}
|
||||
}
|
||||
// Break if there are no more results
|
||||
if results.NextCursor == "" {
|
||||
break
|
||||
}
|
||||
nextCursor = results.NextCursor
|
||||
}
|
||||
|
||||
for {
|
||||
// Use the assets.AssetsByAssetFolder API to list assets
|
||||
assetsParams := admin.AssetsByAssetFolderParams{
|
||||
AssetFolder: remotePrefix,
|
||||
MaxResults: 500,
|
||||
}
|
||||
if nextCursor != "" {
|
||||
assetsParams.NextCursor = nextCursor
|
||||
}
|
||||
|
||||
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list assets: %w", err)
|
||||
}
|
||||
|
||||
for _, asset := range results.Assets {
|
||||
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(asset.Bytes),
|
||||
modTime: asset.CreatedAt,
|
||||
url: asset.SecureURL,
|
||||
publicID: asset.PublicID,
|
||||
resourceType: asset.AssetType,
|
||||
deliveryType: asset.Type,
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
|
||||
// Break if there are no more results
|
||||
if results.NextCursor == "" {
|
||||
break
|
||||
}
|
||||
nextCursor = results.NextCursor
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
searchParams := search.Query{
|
||||
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
|
||||
f.FromStandardFullPath(cldPathDir(remote)),
|
||||
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
|
||||
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
|
||||
MaxResults: 2,
|
||||
}
|
||||
var results *admin.SearchResult
|
||||
f.WaitEventuallyConsistent()
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err1 error
|
||||
results, err1 = f.cld.Admin.Search(ctx, searchParams)
|
||||
if err1 == nil && results.TotalCount != len(results.Assets) {
|
||||
err1 = errors.New("partial response so waiting for eventual consistency")
|
||||
}
|
||||
return shouldRetry(ctx, nil, err1)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if results.TotalCount == 0 || len(results.Assets) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
asset := results.Assets[0]
|
||||
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(asset.Bytes),
|
||||
modTime: asset.UploadedAt,
|
||||
url: asset.SecureURL,
|
||||
md5sum: asset.Etag,
|
||||
publicID: asset.PublicID,
|
||||
resourceType: asset.ResourceType,
|
||||
deliveryType: asset.Type,
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
|
||||
payload := []byte(path.Join(assetFolder, displayName))
|
||||
hash := blake3.Sum256(payload)
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// Put uploads content to Cloudinary
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
params := uploader.UploadParams{
|
||||
UploadPreset: f.opt.UploadPreset,
|
||||
}
|
||||
|
||||
updateObject := false
|
||||
var modTime time.Time
|
||||
for _, option := range options {
|
||||
if updateOptions, ok := option.(*api.UpdateOptions); ok {
|
||||
if updateOptions.PublicID != "" {
|
||||
updateObject = true
|
||||
params.Overwrite = SDKApi.Bool(true)
|
||||
params.Invalidate = SDKApi.Bool(true)
|
||||
params.PublicID = updateOptions.PublicID
|
||||
params.ResourceType = updateOptions.ResourceType
|
||||
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
|
||||
params.AssetFolder = updateOptions.AssetFolder
|
||||
params.DisplayName = updateOptions.DisplayName
|
||||
modTime = src.ModTime(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !updateObject {
|
||||
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
|
||||
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
|
||||
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
|
||||
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
|
||||
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
|
||||
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
|
||||
}
|
||||
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
|
||||
}
|
||||
if !updateObject {
|
||||
modTime = uploadResult.CreatedAt
|
||||
}
|
||||
if uploadResult.Error.Message != "" {
|
||||
return nil, errors.New(uploadResult.Error.Message)
|
||||
}
|
||||
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: int64(uploadResult.Bytes),
|
||||
modTime: modTime,
|
||||
url: uploadResult.SecureURL,
|
||||
md5sum: uploadResult.Etag,
|
||||
publicID: uploadResult.PublicID,
|
||||
resourceType: uploadResult.ResourceType,
|
||||
deliveryType: uploadResult.Type,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// Mkdir creates empty folders
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
|
||||
res, err := f.cld.Admin.CreateFolder(ctx, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Error.Message != "" {
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes empty folders
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// Additional test because Cloudinary will delete folders without
|
||||
// assets, regardless of empty sub-folders
|
||||
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
|
||||
folderParams := admin.SubFoldersParams{
|
||||
Folder: folder,
|
||||
MaxResults: 1,
|
||||
}
|
||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if results.TotalCount > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
params := admin.DeleteFolderParams{Folder: folder}
|
||||
res, err := f.cld.Admin.DeleteFolder(ctx, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Error.Message != "" {
|
||||
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
420, // Too Many Requests (legacy)
|
||||
429, // Too Many Requests
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err != nil {
|
||||
tryAgain := "Try again on "
|
||||
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
|
||||
layout := "2006-01-02 15:04:05 UTC"
|
||||
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
|
||||
timestamp, err2 := time.Parse(layout, dateStr)
|
||||
if err2 == nil {
|
||||
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Hash returns the MD5 of an object
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5sum, nil
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size of object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.url,
|
||||
Options: options,
|
||||
}
|
||||
var offset int64
|
||||
var count int64
|
||||
var key string
|
||||
var value string
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
key, value = option.Header()
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
key, value = option.Header()
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if key != "" && value != "" {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders[key] = value
|
||||
}
|
||||
// Make sure that the asset is fully available
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
|
||||
if clErr == nil && count == int64(cl) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
options = append(options, &api.UpdateOptions{
|
||||
PublicID: o.publicID,
|
||||
ResourceType: o.resourceType,
|
||||
DeliveryType: o.deliveryType,
|
||||
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
|
||||
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
|
||||
})
|
||||
updatedObj, err := o.fs.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uo, ok := updatedObj.(*Object); ok {
|
||||
o.size = uo.size
|
||||
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
|
||||
o.url = uo.url
|
||||
o.md5sum = uo.md5sum
|
||||
o.publicID = uo.publicID
|
||||
o.resourceType = uo.resourceType
|
||||
o.deliveryType = uo.deliveryType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := uploader.DestroyParams{
|
||||
PublicID: o.publicID,
|
||||
ResourceType: o.resourceType,
|
||||
Type: o.deliveryType,
|
||||
}
|
||||
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
|
||||
o.fs.lastCRUD = time.Now()
|
||||
if dErr != nil {
|
||||
return dErr
|
||||
}
|
||||
|
||||
if res.Error.Message != "" {
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
if res.Result != "ok" {
|
||||
return errors.New(res.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
23
backend/cloudinary/cloudinary_test.go
Normal file
23
backend/cloudinary/cloudinary_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
// Test Cloudinary filesystem interface
|
||||
|
||||
package cloudinary_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/cloudinary"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestCloudinary"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*cloudinary.Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -265,6 +266,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// Enable ListP always
|
||||
features.ListP = f.ListP
|
||||
|
||||
// Enable Purge when any upstreams support it
|
||||
if features.Purge == nil {
|
||||
for _, u := range f.upstreams {
|
||||
@@ -809,24 +813,52 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||
if f.root == "" && dir == "" {
|
||||
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||
entries := make(fs.DirEntries, 0, len(f.upstreams))
|
||||
for combineDir := range f.upstreams {
|
||||
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
return callback(entries)
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
entries, err = u.f.List(ctx, uRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := u.wrapEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
return u.wrapEntries(ctx, entries)
|
||||
listP := u.f.Features().ListP
|
||||
if listP == nil {
|
||||
entries, err := u.f.List(ctx, uRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, uRemote, wrappedCallback)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -38,6 +39,7 @@ import (
|
||||
const (
|
||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||
chunkStreams = 0 // Streams to use for reading
|
||||
|
||||
bufferSize = 8388608
|
||||
heuristicBytes = 1048576
|
||||
@@ -207,6 +209,8 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
if !operations.CanServerSideMove(wrappedFs) {
|
||||
f.features.Disable("PutStream")
|
||||
}
|
||||
// Enable ListP always
|
||||
f.features.ListP = f.ListP
|
||||
|
||||
return f, err
|
||||
}
|
||||
@@ -351,11 +355,39 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
||||
// found.
|
||||
// List entries and process them
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.Fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := f.processEntries(entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
return f.processEntries(entries)
|
||||
listP := f.Fs.Features().ListP
|
||||
if listP == nil {
|
||||
entries, err := f.Fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, dir, wrappedCallback)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1362,7 +1394,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
// Get a chunkedreader for the wrapped object
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||
// Get file handle
|
||||
var file io.Reader
|
||||
if offset != 0 {
|
||||
|
||||
@@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
encryptedSuffix: ".bin",
|
||||
}
|
||||
c.buffers.New = func() interface{} {
|
||||
c.buffers.New = func() any {
|
||||
return new([blockSize]byte)
|
||||
}
|
||||
err := c.Key(password, salt)
|
||||
@@ -329,14 +329,14 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
||||
for _, runeValue := range plaintext {
|
||||
dir += int(runeValue)
|
||||
}
|
||||
dir = dir % 256
|
||||
dir %= 256
|
||||
|
||||
// We'll use this number to store in the result filename...
|
||||
var result bytes.Buffer
|
||||
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
||||
|
||||
// but we'll augment it with the nameKey for real calculation
|
||||
for i := 0; i < len(c.nameKey); i++ {
|
||||
for i := range len(c.nameKey) {
|
||||
dir += int(c.nameKey[i])
|
||||
}
|
||||
|
||||
@@ -418,7 +418,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
}
|
||||
|
||||
// add the nameKey to get the real rotate distance
|
||||
for i := 0; i < len(c.nameKey); i++ {
|
||||
for i := range len(c.nameKey) {
|
||||
dir += int(c.nameKey[i])
|
||||
}
|
||||
|
||||
@@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if pos >= 26 {
|
||||
pos -= 6
|
||||
}
|
||||
pos = pos - thisdir
|
||||
pos -= thisdir
|
||||
if pos < 0 {
|
||||
pos += 52
|
||||
}
|
||||
@@ -664,7 +664,7 @@ func (n *nonce) increment() {
|
||||
// add a uint64 to the nonce
|
||||
func (n *nonce) add(x uint64) {
|
||||
carry := uint16(0)
|
||||
for i := 0; i < 8; i++ {
|
||||
for i := range 8 {
|
||||
digit := (*n)[i]
|
||||
xDigit := byte(x)
|
||||
x >>= 8
|
||||
@@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
(*fh.buf)[i] = 0
|
||||
fh.buf[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
|
||||
@@ -1307,10 +1307,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
end := len(ciphertext)
|
||||
if underlyingLimit >= 0 {
|
||||
end = int(underlyingOffset + underlyingLimit)
|
||||
if end > len(ciphertext) {
|
||||
end = len(ciphertext)
|
||||
}
|
||||
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext))
|
||||
}
|
||||
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
return reader, nil
|
||||
@@ -1490,7 +1487,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test truncating the file at each possible point
|
||||
for i := 0; i < len(file16)-1; i++ {
|
||||
for i := range len(file16) - 1 {
|
||||
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
|
||||
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -293,6 +294,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
// Enable ListP always
|
||||
f.features.ListP = f.ListP
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -416,11 +420,40 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := f.encryptEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
return f.encryptEntries(ctx, entries)
|
||||
listP := f.Fs.Features().ListP
|
||||
encryptedDir := f.cipher.EncryptDirName(dir)
|
||||
if listP == nil {
|
||||
entries, err := f.Fs.List(ctx, encryptedDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, encryptedDir, wrappedCallback)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -924,7 +957,7 @@ Usage Example:
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "decode":
|
||||
out := make([]string, 0, len(arg))
|
||||
|
||||
@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
|
||||
}
|
||||
length := len(buf)
|
||||
padding := n - (length % n)
|
||||
for i := 0; i < padding; i++ {
|
||||
for range padding {
|
||||
buf = append(buf, byte(padding))
|
||||
}
|
||||
if (len(buf) % n) != 0 {
|
||||
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
|
||||
if padding == 0 {
|
||||
return nil, ErrorPaddingTooShort
|
||||
}
|
||||
for i := 0; i < padding; i++ {
|
||||
for i := range padding {
|
||||
if buf[length-1-i] != byte(padding) {
|
||||
return nil, ErrorPaddingNotAllTheSame
|
||||
}
|
||||
|
||||
38
backend/doi/api/dataversetypes.go
Normal file
38
backend/doi/api/dataversetypes.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// Type definitions specific to Dataverse
|
||||
|
||||
package api
|
||||
|
||||
// DataverseDatasetResponse is returned by the Dataverse dataset API
|
||||
type DataverseDatasetResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data DataverseDataset `json:"data"`
|
||||
}
|
||||
|
||||
// DataverseDataset is the representation of a dataset
|
||||
type DataverseDataset struct {
|
||||
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
|
||||
}
|
||||
|
||||
// DataverseDatasetVersion is the representation of a dataset version
|
||||
type DataverseDatasetVersion struct {
|
||||
LastUpdateTime string `json:"lastUpdateTime"`
|
||||
Files []DataverseFile `json:"files"`
|
||||
}
|
||||
|
||||
// DataverseFile is the representation of a file found in a dataset
|
||||
type DataverseFile struct {
|
||||
DirectoryLabel string `json:"directoryLabel"`
|
||||
DataFile DataverseDataFile `json:"dataFile"`
|
||||
}
|
||||
|
||||
// DataverseDataFile represents file metadata details
|
||||
type DataverseDataFile struct {
|
||||
ID int64 `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"contentType"`
|
||||
FileSize int64 `json:"filesize"`
|
||||
OriginalFileFormat string `json:"originalFileFormat"`
|
||||
OriginalFileSize int64 `json:"originalFileSize"`
|
||||
OriginalFileName string `json:"originalFileName"`
|
||||
MD5 string `json:"md5"`
|
||||
}
|
||||
33
backend/doi/api/inveniotypes.go
Normal file
33
backend/doi/api/inveniotypes.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Type definitions specific to InvenioRDM
|
||||
|
||||
package api
|
||||
|
||||
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
|
||||
type InvenioRecordResponse struct {
|
||||
Links InvenioRecordResponseLinks `json:"links"`
|
||||
}
|
||||
|
||||
// InvenioRecordResponseLinks represents a record's links
|
||||
type InvenioRecordResponseLinks struct {
|
||||
Self string `json:"self"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponse is the representation of a record's files
|
||||
type InvenioFilesResponse struct {
|
||||
Entries []InvenioFilesResponseEntry `json:"entries"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponseEntry is the representation of a file entry
|
||||
type InvenioFilesResponseEntry struct {
|
||||
Key string `json:"key"`
|
||||
Checksum string `json:"checksum"`
|
||||
Size int64 `json:"size"`
|
||||
Updated string `json:"updated"`
|
||||
MimeType string `json:"mimetype"`
|
||||
Links InvenioFilesResponseEntryLinks `json:"links"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponseEntryLinks represents file links details
|
||||
type InvenioFilesResponseEntryLinks struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
26
backend/doi/api/types.go
Normal file
26
backend/doi/api/types.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Package api has general type definitions for doi
|
||||
package api
|
||||
|
||||
// DoiResolverResponse is returned by the DOI resolver API
|
||||
//
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
type DoiResolverResponse struct {
|
||||
ResponseCode int `json:"responseCode"`
|
||||
Handle string `json:"handle"`
|
||||
Values []DoiResolverResponseValue `json:"values"`
|
||||
}
|
||||
|
||||
// DoiResolverResponseValue is a single handle record value
|
||||
type DoiResolverResponseValue struct {
|
||||
Index int `json:"index"`
|
||||
Type string `json:"type"`
|
||||
Data DoiResolverResponseValueData `json:"data"`
|
||||
TTL int `json:"ttl"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
// DoiResolverResponseValueData is the data held in a handle value
|
||||
type DoiResolverResponseValueData struct {
|
||||
Format string `json:"format"`
|
||||
Value any `json:"value"`
|
||||
}
|
||||
112
backend/doi/dataverse.go
Normal file
112
backend/doi/dataverse.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Implementation for Dataverse
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
|
||||
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
|
||||
queryValues := resolvedURL.Query()
|
||||
persistentID := queryValues.Get("persistentId")
|
||||
return persistentID != ""
|
||||
}
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
|
||||
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
||||
queryValues := resolvedURL.Query()
|
||||
persistentID := queryValues.Get("persistentId")
|
||||
|
||||
query := url.Values{}
|
||||
query.Add("persistentId", persistentID)
|
||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
|
||||
|
||||
return Dataverse, endpointURL, nil
|
||||
}
|
||||
|
||||
// dataverseProvider implements the doiProvider interface for Dataverse installations
|
||||
type dataverseProvider struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
||||
// Use the cache if populated
|
||||
cachedEntries, found := dp.f.cache.GetMaybe("files")
|
||||
if found {
|
||||
parsedEntries, ok := cachedEntries.([]Object)
|
||||
if ok {
|
||||
for _, entry := range parsedEntries {
|
||||
newEntry := entry
|
||||
entries = append(entries, &newEntry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
filesURL := dp.f.endpoint
|
||||
var res *http.Response
|
||||
var result api.DataverseDatasetResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
||||
Parameters: filesURL.Query(),
|
||||
}
|
||||
err = dp.f.pacer.Call(func() (bool, error) {
|
||||
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
|
||||
if modTimeErr != nil {
|
||||
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
|
||||
modTime = timeUnset
|
||||
}
|
||||
for _, file := range result.Data.LatestVersion.Files {
|
||||
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
|
||||
query := url.Values{}
|
||||
query.Add("format", "original")
|
||||
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
|
||||
entry := &Object{
|
||||
fs: dp.f,
|
||||
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
|
||||
contentURL: contentURL.String(),
|
||||
size: file.DataFile.FileSize,
|
||||
modTime: modTime,
|
||||
md5: file.DataFile.MD5,
|
||||
contentType: file.DataFile.ContentType,
|
||||
}
|
||||
if file.DataFile.OriginalFileName != "" {
|
||||
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
|
||||
entry.size = file.DataFile.OriginalFileSize
|
||||
entry.contentType = file.DataFile.OriginalFileFormat
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Populate the cache
|
||||
cacheEntries := []Object{}
|
||||
for _, entry := range entries {
|
||||
cacheEntries = append(cacheEntries, *entry)
|
||||
}
|
||||
dp.f.cache.Put("files", cacheEntries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func newDataverseProvider(f *Fs) doiProvider {
|
||||
return &dataverseProvider{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
649
backend/doi/doi.go
Normal file
649
backend/doi/doi.go
Normal file
@@ -0,0 +1,649 @@
|
||||
// Package doi provides a filesystem interface for digital objects identified by DOIs.
|
||||
//
|
||||
// See: https://www.doi.org/the-identifier/what-is-a-doi/
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/cache"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
// the URL of the DOI resolver
|
||||
//
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
doiResolverAPIURL = "https://doi.org/api"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
errorReadOnly = errors.New("doi remotes are read only")
|
||||
timeUnset = time.Unix(0, 0)
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "doi",
|
||||
Description: "DOI datasets",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "doi",
|
||||
Help: "The DOI or the doi.org URL.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: fs.ConfigProvider,
|
||||
Help: `DOI provider.
|
||||
|
||||
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "auto",
|
||||
Help: "Auto-detect provider",
|
||||
},
|
||||
{
|
||||
Value: string(Zenodo),
|
||||
Help: "Zenodo",
|
||||
}, {
|
||||
Value: string(Dataverse),
|
||||
Help: "Dataverse",
|
||||
}, {
|
||||
Value: string(Invenio),
|
||||
Help: "Invenio",
|
||||
}},
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "doi_resolver_api_url",
|
||||
Help: `The URL of the DOI resolver API to use.
|
||||
|
||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
||||
|
||||
Defaults to "https://doi.org/api".`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Provider defines the type of provider hosting the DOI
|
||||
type Provider string
|
||||
|
||||
const (
|
||||
// Zenodo provider, see https://zenodo.org
|
||||
Zenodo Provider = "zenodo"
|
||||
// Dataverse provider, see https://dataverse.harvard.edu
|
||||
Dataverse Provider = "dataverse"
|
||||
// Invenio provider, see https://inveniordm.docs.cern.ch
|
||||
Invenio Provider = "invenio"
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
|
||||
Provider string `config:"provider"` // The DOI provider
|
||||
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
provider Provider // the DOI provider
|
||||
doiProvider doiProvider // the interface used to interact with the DOI provider
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
endpoint *url.URL // the main API endpoint for this remote
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
cache *cache.Cache // a cache for the remote metadata
|
||||
}
|
||||
|
||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // the remote path
|
||||
contentURL string // the URL where the contents of the file can be downloaded
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
contentType string // content type of the object
|
||||
md5 string // MD5 hash of the object content
|
||||
}
|
||||
|
||||
// doiProvider is the interface used to list objects in a DOI
|
||||
type doiProvider interface {
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
ListEntries(ctx context.Context) (entries []*Object, err error)
|
||||
}
|
||||
|
||||
// Parse the input string as a DOI
|
||||
// Examples:
|
||||
// 10.1000/182 -> 10.1000/182
|
||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
||||
// doi:10.1000/182 -> 10.1000/182
|
||||
func parseDoi(doi string) string {
|
||||
doiURL, err := url.Parse(doi)
|
||||
if err != nil {
|
||||
return doi
|
||||
}
|
||||
if doiURL.Scheme == "doi" {
|
||||
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
|
||||
}
|
||||
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
|
||||
return strings.TrimLeft(doiURL.Path, "/")
|
||||
}
|
||||
return doi
|
||||
}
|
||||
|
||||
// Resolve a DOI to a URL
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
|
||||
resolverURL := opt.DoiResolverAPIURL
|
||||
if resolverURL == "" {
|
||||
resolverURL = doiResolverAPIURL
|
||||
}
|
||||
|
||||
var result api.DoiResolverResponse
|
||||
params := url.Values{}
|
||||
params.Add("index", "1")
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolverURL,
|
||||
Path: "/handles/" + opt.Doi,
|
||||
Parameters: params,
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.ResponseCode != 1 {
|
||||
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
|
||||
}
|
||||
resolvedURLStr := ""
|
||||
for _, value := range result.Values {
|
||||
if value.Type == "URL" && value.Data.Format == "string" {
|
||||
valueStr, ok := value.Data.Value.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
|
||||
}
|
||||
resolvedURLStr = valueStr
|
||||
}
|
||||
}
|
||||
resolvedURL, err := url.Parse(resolvedURLStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resolvedURL, nil
|
||||
}
|
||||
|
||||
// Resolve the passed configuration into a provider and enpoint
|
||||
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
|
||||
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
switch opt.Provider {
|
||||
case string(Dataverse):
|
||||
return resolveDataverseEndpoint(resolvedURL)
|
||||
case string(Invenio):
|
||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
case string(Zenodo):
|
||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
||||
}
|
||||
|
||||
hostname := strings.ToLower(resolvedURL.Hostname())
|
||||
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
|
||||
return resolveDataverseEndpoint(resolvedURL)
|
||||
}
|
||||
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
|
||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
||||
}
|
||||
if activateInvenio(ctx, srv, pacer, resolvedURL) {
|
||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
|
||||
}
|
||||
|
||||
// Make the http connection from the passed options
|
||||
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
||||
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Update f with the new parameters
|
||||
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
|
||||
f.endpoint = endpoint
|
||||
f.endpointURL = endpoint.String()
|
||||
f.provider = provider
|
||||
f.opt.Provider = string(provider)
|
||||
|
||||
switch f.provider {
|
||||
case Dataverse:
|
||||
f.doiProvider = newDataverseProvider(f)
|
||||
case Invenio, Zenodo:
|
||||
f.doiProvider = newInvenioProvider(f)
|
||||
default:
|
||||
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
|
||||
}
|
||||
|
||||
// Determine if the root is a file
|
||||
entries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.remote == f.root {
|
||||
isFile = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return isFile, nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this res and err
|
||||
// deserve to be retried. It returns the err as a convenience.
|
||||
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opt.Doi = parseDoi(opt.Doi)
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: rest.NewClient(client),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
cache: cache.New(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
isFile, err := f.httpConnection(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.root = newRoot
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name returns the configured name of the file system
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root for the filesystem
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns the URL for the filesystem
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("DOI %s", f.opt.Doi)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
// return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Remove a remote http file object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// NewObject creates a new remote http file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
entries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remoteFullPath := remote
|
||||
if f.root != "" {
|
||||
remoteFullPath = path.Join(f.root, remote)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.Remote() == remoteFullPath {
|
||||
return entry, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fileEntries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||
}
|
||||
|
||||
fullDir := path.Join(f.root, dir)
|
||||
if fullDir != "" {
|
||||
fullDir += "/"
|
||||
}
|
||||
|
||||
dirPaths := map[string]bool{}
|
||||
for _, entry := range fileEntries {
|
||||
// First, filter out files not in `fullDir`
|
||||
if !strings.HasPrefix(entry.remote, fullDir) {
|
||||
continue
|
||||
}
|
||||
// Then, find entries in subfolers
|
||||
remotePath := entry.remote
|
||||
if fullDir != "" {
|
||||
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
|
||||
}
|
||||
parts := strings.SplitN(remotePath, "/", 2)
|
||||
if len(parts) == 1 {
|
||||
newEntry := *entry
|
||||
newEntry.remote = path.Join(dir, remotePath)
|
||||
entries = append(entries, &newEntry)
|
||||
} else {
|
||||
dirPaths[path.Join(dir, parts[0])] = true
|
||||
}
|
||||
}
|
||||
|
||||
for dirPath := range dirPaths {
|
||||
entry := fs.NewDir(dirPath, time.Time{})
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote http file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// String returns the URL to the remote HTTP file
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5, nil
|
||||
}
|
||||
|
||||
// Size returns the size in bytes of the remote http file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification and access time to the specified time
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.contentURL,
|
||||
Options: options,
|
||||
}
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
|
||||
// Handle non-compliant redirects
|
||||
if res.Header.Get("Location") != "" {
|
||||
newURL, err := res.Location()
|
||||
if err == nil {
|
||||
opts.RootURL = newURL.String()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "metadata",
|
||||
Short: "Show metadata about the DOI.",
|
||||
Long: `This command returns a JSON object with some information about the DOI.
|
||||
|
||||
rclone backend medatadata doi:
|
||||
|
||||
It returns a JSON object representing metadata about the DOI.
|
||||
`,
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the config parameters.",
|
||||
Long: `This set command can be used to update the config parameters
|
||||
for a running doi backend.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
This rebuilds the connection to the doi backend when it is called with
|
||||
the new parameters. Only new parameters need be passed as the values
|
||||
will default to those currently in use.
|
||||
|
||||
It doesn't return anything.
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "metadata":
|
||||
return f.ShowMetadata(ctx)
|
||||
case "set":
|
||||
newOpt := f.opt
|
||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
_, err = f.httpConnection(ctx, &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updating session: %w", err)
|
||||
}
|
||||
f.opt = newOpt
|
||||
keys := []string{}
|
||||
for k := range opt {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// ShowMetadata returns some metadata about the corresponding DOI
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
|
||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := map[string]any{}
|
||||
info["DOI"] = f.opt.Doi
|
||||
info["URL"] = doiURL.String()
|
||||
info["metadataURL"] = f.endpointURL
|
||||
info["provider"] = f.provider
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
260
backend/doi/doi_internal_test.go
Normal file
260
backend/doi/doi_internal_test.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var remoteName = "TestDoi"
|
||||
|
||||
func TestParseDoi(t *testing.T) {
|
||||
// 10.1000/182 -> 10.1000/182
|
||||
doi := "10.1000/182"
|
||||
parsed := parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
||||
doi = "https://doi.org/10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// https://dx.doi.org/10.1000/182 -> 10.1000/182
|
||||
doi = "https://dxdoi.org/10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// doi:10.1000/182 -> 10.1000/182
|
||||
doi = "doi:10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// doi://10.1000/182 -> 10.1000/182
|
||||
doi = "doi://10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
}
|
||||
|
||||
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
|
||||
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Handle requests for resolving DOIs
|
||||
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are resolving a DOI
|
||||
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
|
||||
assert.NotEmpty(t, handle)
|
||||
index := r.URL.Query().Get("index")
|
||||
assert.Equal(t, "1", index)
|
||||
|
||||
// Return the most basic response
|
||||
result := api.DoiResolverResponse{
|
||||
ResponseCode: 1,
|
||||
Handle: handle,
|
||||
Values: []api.DoiResolverResponseValue{
|
||||
{
|
||||
Index: 1,
|
||||
Type: "URL",
|
||||
Data: api.DoiResolverResponseValueData{
|
||||
Format: "string",
|
||||
Value: resolvedURL,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(mux)
|
||||
|
||||
// Close the server at the end of the test
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return ts.URL + "/api"
|
||||
}
|
||||
|
||||
func md5Sum(text string) string {
|
||||
hash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
|
||||
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Handle requests for a single record
|
||||
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are returning data about a single record
|
||||
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
|
||||
assert.NotEmpty(t, recordID)
|
||||
|
||||
// Return the most basic response
|
||||
selfURL, err := url.Parse("http://" + r.Host)
|
||||
require.NoError(t, err)
|
||||
selfURL = selfURL.JoinPath(r.URL.String())
|
||||
result := api.InvenioRecordResponse{
|
||||
Links: api.InvenioRecordResponseLinks{
|
||||
Self: selfURL.String(),
|
||||
},
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// Handle requests for listing files in a record
|
||||
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Return the most basic response
|
||||
filesBaseURL, err := url.Parse("http://" + r.Host)
|
||||
require.NoError(t, err)
|
||||
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
|
||||
|
||||
entries := []api.InvenioFilesResponseEntry{}
|
||||
for filename, contents := range files {
|
||||
entries = append(entries,
|
||||
api.InvenioFilesResponseEntry{
|
||||
Key: filename,
|
||||
Checksum: md5Sum(contents),
|
||||
Size: int64(len(contents)),
|
||||
Updated: time.Now().UTC().Format(time.RFC3339),
|
||||
MimeType: "text/plain; charset=utf-8",
|
||||
Links: api.InvenioFilesResponseEntryLinks{
|
||||
Content: filesBaseURL.JoinPath(filename).String(),
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
result := api.InvenioFilesResponse{
|
||||
Entries: entries,
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// Handle requests for file contents
|
||||
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are returning the contents of a file
|
||||
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
|
||||
assert.NotEmpty(t, filename)
|
||||
contents, found := files[filename]
|
||||
if !found {
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
|
||||
// Return the most basic response
|
||||
_, err := w.Write([]byte(contents))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(mux)
|
||||
|
||||
// Close the server at the end of the test
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return ts
|
||||
}
|
||||
|
||||
func TestZenodoRemote(t *testing.T) {
|
||||
recordID := "2600782"
|
||||
doi := "10.5281/zenodo.2600782"
|
||||
|
||||
// The files in the dataset
|
||||
files := map[string]string{
|
||||
"README.md": "This is a dataset.",
|
||||
"data.txt": "Some data",
|
||||
}
|
||||
|
||||
ts := prepareMockZenodoServer(t, files)
|
||||
resolvedURL := ts.URL + "/record/" + recordID
|
||||
|
||||
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
|
||||
|
||||
testConfig := configmap.Simple{
|
||||
"type": "doi",
|
||||
"doi": doi,
|
||||
"provider": "zenodo",
|
||||
"doi_resolver_api_url": doiResolverAPIURL,
|
||||
}
|
||||
f, err := NewFs(context.Background(), remoteName, "", testConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test listing the DOI files
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
require.Equal(t, len(files), len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "README.md", e.Remote())
|
||||
assert.Equal(t, int64(18), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "data.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Test reading the DOI files
|
||||
o, err := f.NewObject(context.Background(), "README.md")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(18), o.Size())
|
||||
md5Hash, err := o.Hash(context.Background(), hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, []byte(files["README.md"]), data)
|
||||
do, ok := o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
|
||||
o, err = f.NewObject(context.Background(), "data.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
md5Hash, err = o.Hash(context.Background(), hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
|
||||
fd, err = o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, []byte(files["data.txt"]), data)
|
||||
do, ok = o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
}
|
||||
16
backend/doi/doi_test.go
Normal file
16
backend/doi/doi_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Test DOI filesystem interface
|
||||
package doi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDoi:",
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
164
backend/doi/invenio.go
Normal file
164
backend/doi/invenio.go
Normal file
@@ -0,0 +1,164 @@
|
||||
// Implementation for InvenioRDM
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
|
||||
|
||||
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
|
||||
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
|
||||
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
|
||||
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
||||
var res *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolvedURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err = srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// First, attempt to grab the API URL from the headers
|
||||
var linksetURL *url.URL
|
||||
links := parseLinkHeader(res.Header.Get("Link"))
|
||||
for _, link := range links {
|
||||
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
|
||||
parsed, err := url.Parse(link.Href)
|
||||
if err == nil {
|
||||
linksetURL = parsed
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if linksetURL != nil {
|
||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
|
||||
if err == nil {
|
||||
return Invenio, endpoint, nil
|
||||
}
|
||||
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
|
||||
}
|
||||
|
||||
// If there is no linkset header, try to grab the record ID from the URL
|
||||
recordID := ""
|
||||
resURL := res.Request.URL
|
||||
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
|
||||
if match != nil {
|
||||
recordID = match[1]
|
||||
guessedURL := res.Request.URL.ResolveReference(&url.URL{
|
||||
Path: "/api/records/" + recordID,
|
||||
})
|
||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
|
||||
if err == nil {
|
||||
return Invenio, endpoint, nil
|
||||
}
|
||||
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
|
||||
}
|
||||
|
||||
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
|
||||
var result api.InvenioRecordResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolvedURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Links.Self == "" {
|
||||
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
|
||||
}
|
||||
return url.Parse(result.Links.Self)
|
||||
}
|
||||
|
||||
// invenioProvider implements the doiProvider interface for InvenioRDM installations
|
||||
type invenioProvider struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
||||
// Use the cache if populated
|
||||
cachedEntries, found := ip.f.cache.GetMaybe("files")
|
||||
if found {
|
||||
parsedEntries, ok := cachedEntries.([]Object)
|
||||
if ok {
|
||||
for _, entry := range parsedEntries {
|
||||
newEntry := entry
|
||||
entries = append(entries, &newEntry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
filesURL := ip.f.endpoint.JoinPath("files")
|
||||
var result api.InvenioFilesResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
||||
}
|
||||
err = ip.f.pacer.Call(func() (bool, error) {
|
||||
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
for _, file := range result.Entries {
|
||||
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
|
||||
if modTimeErr != nil {
|
||||
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
|
||||
modTime = timeUnset
|
||||
}
|
||||
entry := &Object{
|
||||
fs: ip.f,
|
||||
remote: file.Key,
|
||||
contentURL: file.Links.Content,
|
||||
size: file.Size,
|
||||
modTime: modTime,
|
||||
contentType: file.MimeType,
|
||||
md5: strings.TrimPrefix(file.Checksum, "md5:"),
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Populate the cache
|
||||
cacheEntries := []Object{}
|
||||
for _, entry := range entries {
|
||||
cacheEntries = append(cacheEntries, *entry)
|
||||
}
|
||||
ip.f.cache.Put("files", cacheEntries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func newInvenioProvider(f *Fs) doiProvider {
|
||||
return &invenioProvider{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
75
backend/doi/link_header.go
Normal file
75
backend/doi/link_header.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
|
||||
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
|
||||
|
||||
// headerLink represents a link as presented in HTTP headers
|
||||
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
|
||||
type headerLink struct {
|
||||
Href string
|
||||
Rel string
|
||||
Type string
|
||||
Extras map[string]string
|
||||
}
|
||||
|
||||
func parseLinkHeader(header string) (links []headerLink) {
|
||||
for _, link := range strings.Split(header, ",") {
|
||||
link = strings.TrimSpace(link)
|
||||
parsed := parseLink(link)
|
||||
if parsed != nil {
|
||||
links = append(links, *parsed)
|
||||
}
|
||||
}
|
||||
return links
|
||||
}
|
||||
|
||||
func parseLink(link string) (parsedLink *headerLink) {
|
||||
var parts []string
|
||||
for _, part := range strings.Split(link, ";") {
|
||||
parts = append(parts, strings.TrimSpace(part))
|
||||
}
|
||||
|
||||
match := linkRegex.FindStringSubmatch(parts[0])
|
||||
if match == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := &headerLink{
|
||||
Href: match[1],
|
||||
Extras: map[string]string{},
|
||||
}
|
||||
|
||||
for _, keyValue := range parts[1:] {
|
||||
parsed := parseKeyValue(keyValue)
|
||||
if parsed != nil {
|
||||
key, value := parsed[0], parsed[1]
|
||||
switch strings.ToLower(key) {
|
||||
case "rel":
|
||||
result.Rel = value
|
||||
case "type":
|
||||
result.Type = value
|
||||
default:
|
||||
result.Extras[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseKeyValue(keyValue string) []string {
|
||||
parts := strings.SplitN(keyValue, "=", 2)
|
||||
if parts[0] == "" || len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
match := valueRegex.FindStringSubmatch(parts[1])
|
||||
if match != nil {
|
||||
parts[1] = match[1]
|
||||
return parts
|
||||
}
|
||||
return parts
|
||||
}
|
||||
44
backend/doi/link_header_internal_test.go
Normal file
44
backend/doi/link_header_internal_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseLinkHeader(t *testing.T) {
|
||||
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
|
||||
links := parseLinkHeader(header)
|
||||
expected := headerLink{
|
||||
Href: "https://zenodo.org/api/records/15063252",
|
||||
Rel: "linkset",
|
||||
Type: "application/linkset+json",
|
||||
Extras: map[string]string{},
|
||||
}
|
||||
assert.Contains(t, links, expected)
|
||||
|
||||
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
|
||||
links = parseLinkHeader(header)
|
||||
expectedList := []headerLink{{
|
||||
Href: "https://api.example.com/issues?page=2",
|
||||
Rel: "prev",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=4",
|
||||
Rel: "next",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=10",
|
||||
Rel: "last",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=1",
|
||||
Rel: "first",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}}
|
||||
assert.Equal(t, links, expectedList)
|
||||
}
|
||||
47
backend/doi/zenodo.go
Normal file
47
backend/doi/zenodo.go
Normal file
@@ -0,0 +1,47 @@
|
||||
// Implementation for Zenodo
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on Zenodo
|
||||
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
|
||||
match := zenodoRecordRegex.FindStringSubmatch(doi)
|
||||
if match == nil {
|
||||
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
|
||||
}
|
||||
|
||||
recordID := match[1]
|
||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
|
||||
|
||||
var result api.InvenioRecordResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: endpointURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
endpointURL, err = url.Parse(result.Links.Self)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return Zenodo, endpointURL, nil
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -37,8 +38,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -80,9 +81,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
driveConfig = &oauth2.Config{
|
||||
driveConfig = &oauthutil.Config{
|
||||
Scopes: []string{scopePrefix + "drive"},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -120,6 +122,7 @@ var (
|
||||
"text/html": ".html",
|
||||
"text/plain": ".txt",
|
||||
"text/tab-separated-values": ".tsv",
|
||||
"text/markdown": ".md",
|
||||
}
|
||||
_mimeTypeToExtensionLinks = map[string]string{
|
||||
"application/x-link-desktop": ".desktop",
|
||||
@@ -197,13 +200,7 @@ func driveScopes(scopesString string) (scopes []string) {
|
||||
|
||||
// Returns true if one of the scopes was "drive.appfolder"
|
||||
func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
for _, scope := range scopes {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
return slices.Contains(scopes, scopePrefix+"drive.appfolder")
|
||||
}
|
||||
|
||||
func driveOAuthOptions() []fs.Option {
|
||||
@@ -957,12 +954,7 @@ func parseDrivePath(path string) (root string, err error) {
|
||||
type listFn func(*drive.File) bool
|
||||
|
||||
func containsString(slice []string, s string) bool {
|
||||
for _, e := range slice {
|
||||
if e == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(slice, s)
|
||||
}
|
||||
|
||||
// getFile returns drive.File for the ID passed and fields passed in
|
||||
@@ -1151,13 +1143,7 @@ OUTER:
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
if title != "" && title != item.Name {
|
||||
found := false
|
||||
for _, stem := range stems {
|
||||
if stem == item.Name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.Contains(stems, item.Name)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
@@ -1210,6 +1196,7 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
@@ -1220,9 +1207,11 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
}
|
||||
|
||||
func isLinkMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/x-link-")
|
||||
}
|
||||
@@ -1557,13 +1546,10 @@ func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
|
||||
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
|
||||
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
for _, space := range info.Spaces {
|
||||
if space == "photos" {
|
||||
info.Md5Checksum = ""
|
||||
info.Sha1Checksum = ""
|
||||
info.Sha256Checksum = ""
|
||||
break
|
||||
}
|
||||
if slices.Contains(info.Spaces, "photos") {
|
||||
info.Md5Checksum = ""
|
||||
info.Sha1Checksum = ""
|
||||
info.Sha256Checksum = ""
|
||||
}
|
||||
}
|
||||
o := &Object{
|
||||
@@ -1655,7 +1641,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
ctx context.Context, remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
extension, exportName, exportMimeType string, isDocument bool,
|
||||
) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
@@ -1758,7 +1745,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
|
||||
}
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
|
||||
}
|
||||
@@ -1789,7 +1776,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
|
||||
}
|
||||
dirID = actualID(dirID)
|
||||
updateInfo := &drive.File{}
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
|
||||
}
|
||||
@@ -1846,6 +1833,7 @@ func linkTemplate(mt string) *template.Template {
|
||||
})
|
||||
return _linkTemplates[mt]
|
||||
}
|
||||
|
||||
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||
fetchFormatsOnce.Do(func() {
|
||||
var about *drive.About
|
||||
@@ -1891,7 +1879,8 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", false)
|
||||
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||
extension, mimeType string, isDocument bool) {
|
||||
extension, mimeType string, isDocument bool,
|
||||
) {
|
||||
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||
if isDocument {
|
||||
for _, _extension := range f.exportExtensions {
|
||||
@@ -2200,7 +2189,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
wg := sync.WaitGroup{}
|
||||
in := make(chan listREntry, listRInputBuffer)
|
||||
out := make(chan error, f.ci.Checkers)
|
||||
list := walk.NewListRHelper(callback)
|
||||
list := list.NewHelper(callback)
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
|
||||
@@ -2219,7 +2208,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
case in <- job:
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
wg.Add(-1)
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2238,7 +2227,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
wg.Add(1)
|
||||
in <- listREntry{directoryID, dir}
|
||||
|
||||
for i := 0; i < f.ci.Checkers; i++ {
|
||||
for range f.ci.Checkers {
|
||||
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
||||
}
|
||||
go func() {
|
||||
@@ -2247,11 +2236,8 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
// if the input channel overflowed add the collected entries to the channel now
|
||||
for len(overflow) > 0 {
|
||||
mu.Lock()
|
||||
l := len(overflow)
|
||||
// only fill half of the channel to prevent entries being put into overflow again
|
||||
if l > listRInputBuffer/2 {
|
||||
l = listRInputBuffer / 2
|
||||
}
|
||||
l := min(len(overflow), listRInputBuffer/2)
|
||||
wg.Add(l)
|
||||
for _, d := range overflow[:l] {
|
||||
in <- d
|
||||
@@ -2271,7 +2257,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
mu.Unlock()
|
||||
}()
|
||||
// wait until the all workers to finish
|
||||
for i := 0; i < f.ci.Checkers; i++ {
|
||||
for range f.ci.Checkers {
|
||||
e := <-out
|
||||
mu.Lock()
|
||||
// if one worker returns an error early, close the input so all other workers exit
|
||||
@@ -2687,7 +2673,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
trashedFiles := false
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@@ -2924,7 +2910,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3185,6 +3170,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -3523,14 +3509,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
return f.unTrash(ctx, dir, directoryID, true)
|
||||
}
|
||||
|
||||
// copy file with id to dest
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
// copy or move file with id to dest
|
||||
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) {
|
||||
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't find id: %w", err)
|
||||
}
|
||||
if info.MimeType == driveFolderType {
|
||||
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||
@@ -3551,14 +3537,21 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy failed: %w", err)
|
||||
|
||||
var opErr error
|
||||
if operation == "moveid" {
|
||||
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
|
||||
} else {
|
||||
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
}
|
||||
if opErr != nil {
|
||||
return fmt.Errorf("%s failed: %w", operation, opErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
|
||||
// Run the drive query calling fn on each entry found
|
||||
func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (err error) {
|
||||
list := f.svc.Files.List()
|
||||
if query != "" {
|
||||
list.Q(query)
|
||||
@@ -3577,10 +3570,7 @@ func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, er
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
list.Spaces("appDataFolder")
|
||||
}
|
||||
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
|
||||
|
||||
var results []*drive.File
|
||||
for {
|
||||
var files *drive.FileList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -3588,20 +3578,66 @@ func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, er
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %w", err)
|
||||
return fmt.Errorf("failed to execute query: %w", err)
|
||||
}
|
||||
if files.IncompleteSearch {
|
||||
fs.Errorf(f, "search result INCOMPLETE")
|
||||
}
|
||||
results = append(results, files.Files...)
|
||||
for _, item := range files.Files {
|
||||
fn(item)
|
||||
}
|
||||
if files.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
list.PageToken(files.NextPageToken)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run the drive query returning the entries found
|
||||
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
|
||||
var results []*drive.File
|
||||
err = f.queryFn(ctx, query, func(item *drive.File) {
|
||||
results = append(results, item)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// Rescue, list or delete orphaned files
|
||||
func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error) {
|
||||
return f.queryFn(ctx, "'me' in owners and trashed=false", func(item *drive.File) {
|
||||
if len(item.Parents) != 0 {
|
||||
return
|
||||
}
|
||||
// Have found an orphaned entry
|
||||
if delete {
|
||||
fs.Infof(item.Name, "Deleting orphan %q into trash", item.Id)
|
||||
err = f.delete(ctx, item.Id, true)
|
||||
if err != nil {
|
||||
fs.Errorf(item.Name, "Failed to delete orphan %q: %v", item.Id, err)
|
||||
}
|
||||
} else if dirID == "" {
|
||||
operations.SyncPrintf("%q, %q\n", item.Name, item.Id)
|
||||
} else {
|
||||
fs.Infof(item.Name, "Rescuing orphan %q", item.Id)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Files.Update(item.Id, nil).
|
||||
AddParents(dirID).
|
||||
Fields(f.getFileFields(ctx)).
|
||||
SupportsAllDrives(true).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(item.Name, "Failed to rescue orphan %q: %v", item.Id, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
@@ -3746,6 +3782,28 @@ attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "moveid",
|
||||
Short: "Move files by ID",
|
||||
Long: `This command moves files by ID
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend moveid drive: ID path
|
||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||
|
||||
It moves the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone moveto).
|
||||
|
||||
The path should end with a / to indicate move the file as named to
|
||||
this directory. If it doesn't end with a / then the last path
|
||||
component will be used as the file name.
|
||||
|
||||
If the destination is a drive backend then server-side moving will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
Short: "Dump the export formats for debug purposes",
|
||||
@@ -3793,6 +3851,37 @@ The result is a JSON array of matches, for example:
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]`,
|
||||
}, {
|
||||
Name: "rescue",
|
||||
Short: "Rescue or delete any orphaned files",
|
||||
Long: `This command rescues or deletes any orphaned files or directories.
|
||||
|
||||
Sometimes files can get orphaned in Google Drive. This means that they
|
||||
are no longer in any folder in Google Drive.
|
||||
|
||||
This command finds those files and either rescues them to a directory
|
||||
you specify or deletes them.
|
||||
|
||||
Usage:
|
||||
|
||||
This can be used in 3 ways.
|
||||
|
||||
First, list all orphaned files
|
||||
|
||||
rclone backend rescue drive:
|
||||
|
||||
Second rescue all orphaned files to the directory indicated
|
||||
|
||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||
|
||||
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
||||
|
||||
rclone backend rescue drive: Orphans
|
||||
|
||||
Third delete all orphaned files to the trash
|
||||
|
||||
rclone backend rescue drive: -o delete
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -3804,7 +3893,7 @@ The result is a JSON array of matches, for example:
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "get":
|
||||
out := make(map[string]string)
|
||||
@@ -3893,16 +3982,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
dir = arg[0]
|
||||
}
|
||||
return f.unTrashDir(ctx, dir, true)
|
||||
case "copyid":
|
||||
case "copyid", "moveid":
|
||||
if len(arg)%2 != 0 {
|
||||
return nil, errors.New("need an even number of arguments")
|
||||
}
|
||||
for len(arg) > 0 {
|
||||
id, dest := arg[0], arg[1]
|
||||
arg = arg[2:]
|
||||
err = f.copyID(ctx, id, dest)
|
||||
err = f.copyOrMoveID(ctx, name, id, dest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
|
||||
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
@@ -3913,14 +4002,29 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
case "query":
|
||||
if len(arg) == 1 {
|
||||
query := arg[0]
|
||||
var results, err = f.query(ctx, query)
|
||||
results, err := f.query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
return nil, errors.New("need a query argument")
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
if len(arg) == 0 {
|
||||
// no arguments - list only
|
||||
} else if !delete && len(arg) == 1 {
|
||||
dir := arg[0]
|
||||
dirID, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find or create rescue directory %q: %w", dir, err)
|
||||
}
|
||||
fs.Infof(f, "Rescuing orphans into %q", dir)
|
||||
} else {
|
||||
return nil, errors.New("syntax error: need 0 or 1 args or -o delete")
|
||||
}
|
||||
return nil, f.rescue(ctx, dirID, delete)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
@@ -3964,8 +4068,9 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return "", nil
|
||||
@@ -3978,7 +4083,8 @@ func (o *baseObject) Size() int64 {
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error,
|
||||
) {
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
@@ -4191,12 +4297,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
// the HEAD in the listing to this GET. This stops rclone marking
|
||||
// the transfer as corrupted.
|
||||
var offset, end int64 = 0, -1
|
||||
var newOptions = options[:0]
|
||||
newOptions := options[:0]
|
||||
for _, o := range options {
|
||||
// Note that Range requests don't work on Google docs:
|
||||
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
|
||||
@@ -4223,9 +4330,10 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
var data = o.content
|
||||
data := o.content
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -4250,7 +4358,8 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
src fs.ObjectInfo,
|
||||
) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
@@ -4328,6 +4437,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
importMimeType := ""
|
||||
@@ -4423,6 +4533,7 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
func (o *linkObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{".doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
||||
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil},
|
||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||
} {
|
||||
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
|
||||
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
obj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("BadID", func(t *testing.T) {
|
||||
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "couldn't find id")
|
||||
})
|
||||
@@ -506,19 +506,31 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
t.Run("Directory", func(t *testing.T) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
require.NoError(t, err)
|
||||
err = f.copyID(ctx, rootID, dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't copy directory")
|
||||
assert.Contains(t, err.Error(), "can't moveid directory")
|
||||
})
|
||||
|
||||
t.Run("WithoutDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/")
|
||||
t.Run("MoveWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("WithDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||
t.Run("CopyWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("MoveWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
|
||||
t.Run("CopyWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
@@ -566,7 +578,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
opt := &filter.Opt{}
|
||||
opt := &filter.Options{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
flt, err := filter.NewFilter(opt)
|
||||
@@ -647,7 +659,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
|
||||
t.Run("Query", f.InternalTestQuery)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -324,9 +325,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
||||
metadata := make(fs.Metadata, 16)
|
||||
|
||||
// Dump user metadata first as it overrides system metadata
|
||||
for k, v := range info.Properties {
|
||||
metadata[k] = v
|
||||
}
|
||||
maps.Copy(metadata, info.Properties)
|
||||
|
||||
// System metadata
|
||||
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
|
||||
@@ -508,7 +507,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
|
||||
//
|
||||
// It returns a callback which should be called to finish the updates
|
||||
// after the data is uploaded.
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
|
||||
callbackFns := []updateMetadataFn{}
|
||||
callback = func(ctx context.Context, info *drive.File) error {
|
||||
for _, fn := range callbackFns {
|
||||
@@ -533,7 +532,9 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
switch k {
|
||||
case "copy-requires-writer-permission":
|
||||
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
if isFolder {
|
||||
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
|
||||
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "writers-can-share":
|
||||
@@ -630,7 +631,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
||||
}
|
||||
|
||||
@@ -177,10 +177,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
if start >= rx.ContentLength {
|
||||
break
|
||||
}
|
||||
reqSize = rx.ContentLength - start
|
||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||
reqSize = int64(rx.f.opt.ChunkSize)
|
||||
}
|
||||
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize))
|
||||
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
} else {
|
||||
// If size unknown read into buffer
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
@@ -21,14 +20,10 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -55,10 +55,7 @@ func (d *digest) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
for len(p) > 0 {
|
||||
d.writtenMore = true
|
||||
toWrite := bytesPerBlock - d.n
|
||||
if toWrite > len(p) {
|
||||
toWrite = len(p)
|
||||
}
|
||||
toWrite := min(bytesPerBlock-d.n, len(p))
|
||||
_, err = d.blockHash.Write(p[:toWrite])
|
||||
if err != nil {
|
||||
panic(hashReturnedError)
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
func testChunk(t *testing.T, chunk int) {
|
||||
data := make([]byte, chunk)
|
||||
for i := 0; i < chunk; i++ {
|
||||
for i := range chunk {
|
||||
data[i] = 'A'
|
||||
}
|
||||
for _, test := range []struct {
|
||||
|
||||
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -91,9 +92,12 @@ const (
|
||||
maxFileNameLength = 255
|
||||
)
|
||||
|
||||
type exportAPIFormat string
|
||||
type exportExtension string // dotless
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
dropboxConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"files.metadata.write",
|
||||
"files.content.write",
|
||||
@@ -108,7 +112,8 @@ var (
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
|
||||
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -130,10 +135,20 @@ var (
|
||||
DefaultTimeoutAsync: 10 * time.Second,
|
||||
DefaultBatchSizeAsync: 100,
|
||||
}
|
||||
|
||||
exportKnownAPIFormats = map[exportAPIFormat]exportExtension{
|
||||
"markdown": "md",
|
||||
"html": "html",
|
||||
}
|
||||
// Populated based on exportKnownAPIFormats
|
||||
exportKnownExtensions = map[exportExtension]exportAPIFormat{}
|
||||
|
||||
paperExtension = ".paper"
|
||||
paperTemplateExtension = ".papert"
|
||||
)
|
||||
|
||||
// Gets an oauth config with the right scopes
|
||||
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
|
||||
// If not impersonating, use standard scopes
|
||||
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
|
||||
return dropboxConfig
|
||||
@@ -245,23 +260,61 @@ folders.`,
|
||||
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
}, {
|
||||
Name: "export_formats",
|
||||
Help: `Comma separated list of preferred formats for exporting files
|
||||
|
||||
Certain Dropbox files can only be accessed by exporting them to another format.
|
||||
These include Dropbox Paper documents.
|
||||
|
||||
For each such file, rclone will choose the first format on this list that Dropbox
|
||||
considers valid. If none is valid, it will choose Dropbox's default format.
|
||||
|
||||
Known formats include: "html", "md" (markdown)`,
|
||||
Default: fs.CommaSepList{"html", "md"},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_exports",
|
||||
Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "show_all_exports",
|
||||
Default: false,
|
||||
Help: `Show all exportable files in listings.
|
||||
|
||||
Adding this flag will allow all exportable files to be server side copied.
|
||||
Note that rclone doesn't add extensions to the exportable file names in this mode.
|
||||
|
||||
Do **not** use this flag when trying to download exportable files - rclone
|
||||
will fail to download them.
|
||||
`,
|
||||
Advanced: true,
|
||||
},
|
||||
}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
})
|
||||
|
||||
for apiFormat, ext := range exportKnownAPIFormats {
|
||||
exportKnownExtensions[ext] = apiFormat
|
||||
}
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
ExportFormats fs.CommaSepList `config:"export_formats"`
|
||||
SkipExports bool `config:"skip_exports"`
|
||||
ShowAllExports bool `config:"show_all_exports"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -281,8 +334,18 @@ type Fs struct {
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
|
||||
exportExts []exportExtension
|
||||
}
|
||||
|
||||
type exportType int
|
||||
|
||||
const (
|
||||
notExport exportType = iota // a regular file
|
||||
exportHide // should be hidden
|
||||
exportListOnly // listable, but can't export
|
||||
exportExportable // can export
|
||||
)
|
||||
|
||||
// Object describes a dropbox object
|
||||
//
|
||||
// Dropbox Objects always have full metadata
|
||||
@@ -294,6 +357,9 @@ type Object struct {
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hash string // content_hash of the object
|
||||
|
||||
exportType exportType
|
||||
exportAPIFormat exportAPIFormat
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -316,32 +382,46 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Some specific errors which should be excluded from retries
|
||||
func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, err
|
||||
}
|
||||
errString := err.Error()
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// First check for specific errors
|
||||
//
|
||||
// These come back from the SDK in a whole host of different
|
||||
// error types, but there doesn't seem to be a consistent way
|
||||
// of reading the error cause, so here we just check using the
|
||||
// error string which isn't perfect but does the job.
|
||||
errString := err.Error()
|
||||
if strings.Contains(errString, "insufficient_space") {
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if strings.Contains(errString, "malformed_path") {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// Then handle any official Retry-After header from Dropbox's SDK
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
errString := err.Error()
|
||||
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
||||
return true, err
|
||||
}
|
||||
@@ -386,7 +466,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
oldToken = strings.TrimSpace(oldToken)
|
||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||
@@ -420,6 +500,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
|
||||
for _, e := range opt.ExportFormats {
|
||||
ext := exportExtension(e)
|
||||
if exportKnownExtensions[ext] == "" {
|
||||
return nil, fmt.Errorf("dropbox: unknown export format '%s'", e)
|
||||
}
|
||||
f.exportExts = append(f.exportExts, ext)
|
||||
}
|
||||
|
||||
// unauthorized config for endpoints that fail with auth
|
||||
ucfg := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
@@ -572,38 +660,126 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
}
|
||||
|
||||
type getMetadataResult struct {
|
||||
entry files.IsMetadata
|
||||
notFound bool
|
||||
err error
|
||||
}
|
||||
|
||||
// getMetadata gets the metadata for a file or directory
|
||||
func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) {
|
||||
res.err = f.pacer.Call(func() (bool, error) {
|
||||
res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
Path: f.opt.Enc.FromStandardPath(objPath),
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
return shouldRetry(ctx, res.err)
|
||||
})
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
if res.err != nil {
|
||||
switch e := res.err.(type) {
|
||||
case files.GetMetadataAPIError:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
notFound = true
|
||||
err = nil
|
||||
res.notFound = true
|
||||
res.err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getFileMetadata gets the metadata for a file
|
||||
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Get metadata such that the result would be exported with the given extension
|
||||
// Return a channel that will eventually receive the metadata
|
||||
func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult {
|
||||
ch := make(chan getMetadataResult, 1)
|
||||
wantDownloadable := (wantExportExtension == "")
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
res := f.getMetadata(ctx, filePath)
|
||||
info, ok := res.entry.(*files.FileMetadata)
|
||||
if !ok { // Can't check anything about file, just return what we have
|
||||
ch <- res
|
||||
return
|
||||
}
|
||||
|
||||
// Return notFound if downloadability or extension doesn't match
|
||||
if wantDownloadable != info.IsDownloadable {
|
||||
ch <- getMetadataResult{notFound: true}
|
||||
return
|
||||
}
|
||||
if !info.IsDownloadable {
|
||||
_, ext := f.chooseExportFormat(info)
|
||||
if ext != wantExportExtension {
|
||||
ch <- getMetadataResult{notFound: true}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Return our real result or error
|
||||
ch <- res
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// For a given rclone-path, figure out what the Dropbox-path may be, in order of preference.
|
||||
// Multiple paths might be plausible, due to export path munging.
|
||||
func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) {
|
||||
ret = []<-chan getMetadataResult{}
|
||||
|
||||
// Prefer an exact match
|
||||
ret = append(ret, f.getMetadataForExt(ctx, filePath, ""))
|
||||
|
||||
// Check if we're plausibly an export path, otherwise we're done
|
||||
if f.opt.SkipExports || f.opt.ShowAllExports {
|
||||
return
|
||||
}
|
||||
if notFound {
|
||||
dotted := path.Ext(filePath)
|
||||
if dotted == "" {
|
||||
return
|
||||
}
|
||||
ext := exportExtension(dotted[1:])
|
||||
if exportKnownExtensions[ext] == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// We might be an export path! Try all possibilities
|
||||
base := strings.TrimSuffix(filePath, dotted)
|
||||
|
||||
// `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper`
|
||||
if strings.HasSuffix(base, paperTemplateExtension) {
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper`
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext))
|
||||
return
|
||||
}
|
||||
|
||||
// getFileMetadata gets the metadata for a file
|
||||
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) {
|
||||
var res getMetadataResult
|
||||
|
||||
// Try all possible metadatas
|
||||
possibleMetadatas := f.possibleMetadatas(ctx, filePath)
|
||||
for _, ch := range possibleMetadatas {
|
||||
res = <-ch
|
||||
|
||||
if res.err != nil {
|
||||
return nil, res.err
|
||||
}
|
||||
if !res.notFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if res.notFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
fileInfo, ok := entry.(*files.FileMetadata)
|
||||
|
||||
fileInfo, ok := res.entry.(*files.FileMetadata)
|
||||
if !ok {
|
||||
if _, ok = entry.(*files.FolderMetadata); ok {
|
||||
if _, ok = res.entry.(*files.FolderMetadata); ok {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
@@ -612,15 +788,15 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
|
||||
}
|
||||
|
||||
// getDirMetadata gets the metadata for a directory
|
||||
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(ctx, dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) {
|
||||
res := f.getMetadata(ctx, dirPath)
|
||||
if res.err != nil {
|
||||
return nil, res.err
|
||||
}
|
||||
if notFound {
|
||||
if res.notFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
dirInfo, ok := entry.(*files.FolderMetadata)
|
||||
dirInfo, ok := res.entry.(*files.FolderMetadata)
|
||||
if !ok {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
@@ -820,16 +996,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var res *files.ListFolderResult
|
||||
for {
|
||||
if !started {
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(root),
|
||||
Recursive: false,
|
||||
Limit: 1000,
|
||||
}
|
||||
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root))
|
||||
arg.Recursive = false
|
||||
arg.Limit = 1000
|
||||
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
res, err = f.srv.ListFolder(arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -882,7 +1057,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
if o.(*Object).exportType.listable() {
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !res.HasMore {
|
||||
@@ -968,16 +1145,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
}
|
||||
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: encRoot,
|
||||
Recursive: false,
|
||||
}
|
||||
arg := files.NewListFolderArg(encRoot)
|
||||
arg.Recursive = false
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
var res *files.ListFolderResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
res, err = f.srv.ListFolder(arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1020,13 +1195,20 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
@@ -1040,7 +1222,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.CopyV2(&arg)
|
||||
@@ -1152,6 +1333,16 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
|
||||
// Some plans can't create links with expiry
|
||||
fs.Debugf(absPath, "can't create link with expiry, trying without")
|
||||
createArg.Settings.Expires = nil
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
@@ -1316,16 +1507,14 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
|
||||
var startCursor *files.ListFolderGetLatestCursorResult
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
|
||||
Recursive: true,
|
||||
}
|
||||
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(f.slashRoot))
|
||||
arg.Recursive = true
|
||||
|
||||
if arg.Path == "/" {
|
||||
arg.Path = ""
|
||||
}
|
||||
|
||||
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
|
||||
startCursor, err = f.srv.ListFolderGetLatestCursor(arg)
|
||||
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -1429,8 +1618,50 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) chooseExportFormat(info *files.FileMetadata) (exportAPIFormat, exportExtension) {
|
||||
// Find API export formats Dropbox supports for this file
|
||||
// Sometimes Dropbox lists a format in ExportAs but not ExportOptions, so check both
|
||||
ei := info.ExportInfo
|
||||
dropboxFormatStrings := append([]string{ei.ExportAs}, ei.ExportOptions...)
|
||||
|
||||
// Find which extensions these correspond to
|
||||
exportExtensions := map[exportExtension]exportAPIFormat{}
|
||||
var dropboxPreferredAPIFormat exportAPIFormat
|
||||
var dropboxPreferredExtension exportExtension
|
||||
for _, format := range dropboxFormatStrings {
|
||||
apiFormat := exportAPIFormat(format)
|
||||
// Only consider formats we know about
|
||||
if ext, ok := exportKnownAPIFormats[apiFormat]; ok {
|
||||
if dropboxPreferredAPIFormat == "" {
|
||||
dropboxPreferredAPIFormat = apiFormat
|
||||
dropboxPreferredExtension = ext
|
||||
}
|
||||
exportExtensions[ext] = apiFormat
|
||||
}
|
||||
}
|
||||
|
||||
// See if the user picked a valid extension
|
||||
for _, ext := range f.exportExts {
|
||||
if apiFormat, ok := exportExtensions[ext]; ok {
|
||||
return apiFormat, ext
|
||||
}
|
||||
}
|
||||
|
||||
// If no matches, prefer the first valid format Dropbox lists
|
||||
return dropboxPreferredAPIFormat, dropboxPreferredExtension
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func (et exportType) listable() bool {
|
||||
return et != exportHide
|
||||
}
|
||||
|
||||
// something we should _try_ to export
|
||||
func (et exportType) exportable() bool {
|
||||
return et == exportExportable || et == exportListOnly
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
@@ -1474,6 +1705,32 @@ func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
func (o *Object) setMetadataForExport(info *files.FileMetadata) {
|
||||
o.bytes = -1
|
||||
o.hash = ""
|
||||
|
||||
if o.fs.opt.SkipExports {
|
||||
o.exportType = exportHide
|
||||
return
|
||||
}
|
||||
if o.fs.opt.ShowAllExports {
|
||||
o.exportType = exportListOnly
|
||||
return
|
||||
}
|
||||
|
||||
var exportExt exportExtension
|
||||
o.exportAPIFormat, exportExt = o.fs.chooseExportFormat(info)
|
||||
if o.exportAPIFormat == "" {
|
||||
o.exportType = exportHide
|
||||
} else {
|
||||
o.exportType = exportExportable
|
||||
// get rid of any paper extension, if present
|
||||
o.remote = strings.TrimSuffix(o.remote, paperExtension)
|
||||
// add the export extension
|
||||
o.remote += "." + string(exportExt)
|
||||
}
|
||||
}
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inaccurate date
|
||||
@@ -1482,6 +1739,10 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||
o.bytes = int64(info.Size)
|
||||
o.modTime = info.ClientModified
|
||||
o.hash = info.ContentHash
|
||||
|
||||
if !info.IsDownloadable {
|
||||
o.setMetadataForExport(info)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1545,6 +1806,27 @@ func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) export(ctx context.Context) (in io.ReadCloser, err error) {
|
||||
if o.exportType == exportListOnly || o.exportAPIFormat == "" {
|
||||
fs.Debugf(o.remote, "No export format found")
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
arg := files.ExportArg{Path: o.id, ExportFormat: string(o.exportAPIFormat)}
|
||||
var exportResult *files.ExportResult
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
exportResult, in, err = o.fs.srv.Export(&arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.bytes = int64(exportResult.ExportMetadata.Size)
|
||||
o.hash = exportResult.ExportMetadata.ExportHash
|
||||
return
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.SharedFiles {
|
||||
@@ -1564,6 +1846,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return
|
||||
}
|
||||
|
||||
if o.exportType.exportable() {
|
||||
return o.export(ctx)
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{
|
||||
@@ -1692,14 +1978,10 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalCheckPathLength(t *testing.T) {
|
||||
@@ -42,3 +49,54 @@ func TestInternalCheckPathLength(t *testing.T) {
|
||||
assert.Equal(t, test.ok, err == nil, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) importPaperForTest(t *testing.T) {
|
||||
content := `# test doc
|
||||
|
||||
Lorem ipsum __dolor__ sit amet
|
||||
[link](http://google.com)
|
||||
`
|
||||
|
||||
arg := files.PaperCreateArg{
|
||||
Path: f.slashRootSlash + "export.paper",
|
||||
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
|
||||
}
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
reader := strings.NewReader(content)
|
||||
_, err = f.srv.PaperCreate(&arg, reader)
|
||||
return shouldRetry(context.Background(), err)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestPaperExport(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f.importPaperForTest(t)
|
||||
|
||||
f.exportExts = []exportExtension{"html"}
|
||||
|
||||
obj, err := f.NewObject(ctx, "export.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
buf, err := io.ReadAll(rc)
|
||||
require.NoError(t, err)
|
||||
text := string(buf)
|
||||
|
||||
for _, excerpt := range []string{
|
||||
"Lorem ipsum",
|
||||
"<b>dolor</b>",
|
||||
`href="http://google.com"`,
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PaperExport", f.InternalTestPaperExport)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
return false, err // No such user
|
||||
case 186:
|
||||
return false, err // IP blocked?
|
||||
case 374:
|
||||
case 374, 412: // Flood detected seems to be #412 now
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
default:
|
||||
|
||||
@@ -441,23 +441,28 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
|
||||
// Find current directory ID
|
||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If it is in the correct directory, just rename it
|
||||
var url string
|
||||
if currentDirectoryID == directoryID {
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||
if srcDirectoryID == dstDirectoryID {
|
||||
// No rename needed
|
||||
if srcLeaf == dstLeaf {
|
||||
return src, nil
|
||||
}
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
}
|
||||
@@ -466,11 +471,16 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
url = resp.URLs[0].URL
|
||||
} else {
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
dstFolderID, err := strconv.Atoi(dstDirectoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
rename := dstLeaf
|
||||
// No rename needed
|
||||
if srcLeaf == dstLeaf {
|
||||
rename = ""
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
|
||||
@@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
|
||||
|
||||
// fields returns the JSON fields in use by opt as a | separated
|
||||
// string.
|
||||
func fields(opt interface{}) (pipeTags string, err error) {
|
||||
func fields(opt any) (pipeTags string, err error) {
|
||||
var tags []string
|
||||
def := reflect.ValueOf(opt)
|
||||
defType := def.Type()
|
||||
for i := 0; i < def.NumField(); i++ {
|
||||
for i := range def.NumField() {
|
||||
field := defType.Field(i)
|
||||
tag, ok := field.Tag.Lookup("json")
|
||||
if !ok {
|
||||
@@ -239,7 +239,7 @@ func fields(opt interface{}) (pipeTags string, err error) {
|
||||
|
||||
// mustFields returns the JSON fields in use by opt as a | separated
|
||||
// string. It panics on failure.
|
||||
func mustFields(opt interface{}) string {
|
||||
func mustFields(opt any) string {
|
||||
tags, err := fields(opt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -351,12 +351,12 @@ type SpaceInfo struct {
|
||||
// DeleteResponse is returned from doDeleteFile
|
||||
type DeleteResponse struct {
|
||||
Status
|
||||
Deleted []string `json:"deleted"`
|
||||
Errors []interface{} `json:"errors"`
|
||||
ID string `json:"fi_id"`
|
||||
BackgroundTask int `json:"backgroundtask"`
|
||||
UsSize string `json:"us_size"`
|
||||
PaSize string `json:"pa_size"`
|
||||
Deleted []string `json:"deleted"`
|
||||
Errors []any `json:"errors"`
|
||||
ID string `json:"fi_id"`
|
||||
BackgroundTask int `json:"backgroundtask"`
|
||||
UsSize string `json:"us_size"`
|
||||
PaSize string `json:"pa_size"`
|
||||
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||
}
|
||||
|
||||
|
||||
@@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
||||
}
|
||||
|
||||
// params for rpc
|
||||
type params map[string]interface{}
|
||||
type params map[string]any
|
||||
|
||||
// rpc calls the rpc.php method of the SME file fabric
|
||||
//
|
||||
|
||||
81
backend/filelu/api/types.go
Normal file
81
backend/filelu/api/types.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// Package api defines types for interacting with the FileLu API.
|
||||
package api
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// CreateFolderResponse represents the response for creating a folder.
|
||||
type CreateFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
FldID interface{} `json:"fld_id"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// DeleteFolderResponse represents the response for deleting a folder.
|
||||
type DeleteFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
// FolderListResponse represents the response for listing folders.
|
||||
type FolderListResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
FldID json.Number `json:"fld_id"`
|
||||
Path string `json:"path"`
|
||||
FileCode string `json:"file_code"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"files"`
|
||||
Folders []struct {
|
||||
Name string `json:"name"`
|
||||
FldID json.Number `json:"fld_id"`
|
||||
Path string `json:"path"`
|
||||
} `json:"folders"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// FileDirectLinkResponse represents the response for a direct link to a file.
|
||||
type FileDirectLinkResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
URL string `json:"url"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// FileInfoResponse represents the response for file information.
|
||||
type FileInfoResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result []struct {
|
||||
Size string `json:"size"`
|
||||
Name string `json:"name"`
|
||||
FileCode string `json:"filecode"`
|
||||
Hash string `json:"hash"`
|
||||
Status int `json:"status"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// DeleteFileResponse represents the response for deleting a file.
|
||||
type DeleteFileResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
// AccountInfoResponse represents the response for account information.
|
||||
type AccountInfoResponse struct {
|
||||
Status int `json:"status"` // HTTP status code of the response.
|
||||
Msg string `json:"msg"` // Message describing the response.
|
||||
Result struct {
|
||||
PremiumExpire string `json:"premium_expire"` // Expiration date of premium access.
|
||||
Email string `json:"email"` // User's email address.
|
||||
UType string `json:"utype"` // User type (e.g., premium or free).
|
||||
Storage string `json:"storage"` // Total storage available to the user.
|
||||
StorageUsed string `json:"storage_used"` // Amount of storage used.
|
||||
} `json:"result"` // Nested result structure containing account details.
|
||||
}
|
||||
366
backend/filelu/filelu.go
Normal file
366
backend/filelu/filelu.go
Normal file
@@ -0,0 +1,366 @@
|
||||
// Package filelu provides an interface to the FileLu storage system.
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Register the backend with Rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filelu",
|
||||
Description: "FileLu Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "key",
|
||||
Help: "Your FileLu Rclone key from My Account",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeExclamation |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeSingleQuote |
|
||||
encoder.EncodeBackQuote |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeDollar |
|
||||
encoder.EncodeColon |
|
||||
encoder.EncodeAsterisk |
|
||||
encoder.EncodePipe |
|
||||
encoder.EncodeHash |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftPeriod |
|
||||
encoder.EncodeLeftTilde |
|
||||
encoder.EncodeLeftCrLfHtVt |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeSquareBracket |
|
||||
encoder.EncodeSemicolon |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeDot),
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
// Options defines the configuration for the FileLu backend
|
||||
type Options struct {
|
||||
Key string `config:"key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents the FileLu file system
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpoint string
|
||||
pacer *pacer.Pacer
|
||||
srv *rest.Client
|
||||
client *http.Client
|
||||
targetFile string
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object for FileLu
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||
}
|
||||
|
||||
if opt.Key == "" {
|
||||
return nil, fmt.Errorf("FileLu Rclone Key is required")
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
if strings.TrimSpace(root) == "" {
|
||||
root = ""
|
||||
}
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
filename := ""
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
endpoint: "https://filelu.com/rclone",
|
||||
client: client,
|
||||
srv: rest.NewClient(client).SetRoot("https://filelu.com/rclone"),
|
||||
pacer: pacer.New(),
|
||||
targetFile: filename,
|
||||
root: root,
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
WriteMetadata: false,
|
||||
SlowHash: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
rootContainer, rootDirectory := rootSplit(f.root)
|
||||
if rootContainer != "" && rootDirectory != "" {
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.root = strings.Trim(newRoot, "/")
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = strings.Trim(oldRoot, "/")
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Mkdir to create directory on remote server.
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fullPath := path.Clean(f.root + "/" + dir)
|
||||
_, err := f.createFolder(ctx, fullPath)
|
||||
return err
|
||||
}
|
||||
|
||||
// About provides usage statistics for the remote
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
accountInfo, err := f.getAccountInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalStorage, err := parseStorageToBytes(accountInfo.Result.Storage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse total storage: %w", err)
|
||||
}
|
||||
|
||||
usedStorage, err := parseStorageToBytes(accountInfo.Result.StorageUsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse used storage: %w", err)
|
||||
}
|
||||
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(totalStorage), // Total bytes available
|
||||
Used: fs.NewUsageValue(usedStorage), // Total bytes used
|
||||
Free: fs.NewUsageValue(totalStorage - usedStorage),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Purge deletes the directory and all its contents
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
fullPath := path.Join(f.root, dir)
|
||||
if fullPath != "" {
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
}
|
||||
return f.deleteFolder(ctx, fullPath)
|
||||
}
|
||||
|
||||
// List returns a list of files and folders
|
||||
// List returns a list of files and folders for the given directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
// Compose full path for API call
|
||||
fullPath := path.Join(f.root, dir)
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
if fullPath == "/" {
|
||||
fullPath = ""
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
result, err := f.getFolderList(ctx, fullPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fldMap := map[string]bool{}
|
||||
for _, folder := range result.Result.Folders {
|
||||
fldMap[folder.FldID.String()] = true
|
||||
if f.root == "" && dir == "" && strings.Contains(folder.Path, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
paths := strings.Split(folder.Path, fullPath+"/")
|
||||
remote := paths[0]
|
||||
if len(paths) > 1 {
|
||||
remote = paths[1]
|
||||
}
|
||||
|
||||
if strings.Contains(remote, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
pathsWithoutRoot := strings.Split(folder.Path, "/"+f.root+"/")
|
||||
remotePathWithoutRoot := pathsWithoutRoot[0]
|
||||
if len(pathsWithoutRoot) > 1 {
|
||||
remotePathWithoutRoot = pathsWithoutRoot[1]
|
||||
}
|
||||
remotePathWithoutRoot = strings.TrimPrefix(remotePathWithoutRoot, "/")
|
||||
entries = append(entries, fs.NewDir(remotePathWithoutRoot, time.Now()))
|
||||
}
|
||||
for _, file := range result.Result.Files {
|
||||
if _, ok := fldMap[file.FldID.String()]; ok {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, file.Name)
|
||||
// trim leading slashes
|
||||
remote = strings.TrimPrefix(remote, "/")
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: file.Size,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
entries = append(entries, obj)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put uploads a file directly to the destination folder in the FileLu storage system.
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
err := f.uploadFile(ctx, in, src.Remote())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newObject := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}
|
||||
fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote())
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
// Move moves the file to the specified location
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, destinationPath string) (fs.Object, error) {
|
||||
|
||||
if strings.HasPrefix(destinationPath, "/") || strings.Contains(destinationPath, ":\\") {
|
||||
dir := path.Dir(destinationPath)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
reader, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
dest, err := os.Create(destinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(dest, reader); err != nil {
|
||||
return nil, fmt.Errorf("failed to copy file content: %w", err)
|
||||
}
|
||||
|
||||
if err := src.Remove(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove source file: %w", err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
reader, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open source object: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = f.uploadFile(ctx, reader, destinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload file to destination: %w", err)
|
||||
}
|
||||
|
||||
if err := src.Remove(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete source file: %w", err)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: destinationPath,
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Rmdir removes a directory
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
fullPath := path.Join(f.root, dir)
|
||||
if fullPath != "" {
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
}
|
||||
|
||||
// Step 1: Check if folder is empty
|
||||
listResp, err := f.getFolderList(ctx, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(listResp.Result.Files) > 0 || len(listResp.Result.Folders) > 0 {
|
||||
return fmt.Errorf("Rmdir: directory %q is not empty", fullPath)
|
||||
}
|
||||
|
||||
// Step 2: Delete the folder
|
||||
return f.deleteFolder(ctx, fullPath)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
324
backend/filelu/filelu_client.go
Normal file
324
backend/filelu/filelu_client.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/backend/filelu/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// createFolder creates a folder at the specified path.
|
||||
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
|
||||
encodedDir := f.fromStandardPath(dirPath)
|
||||
apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(encodedDir),
|
||||
url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field
|
||||
)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
result := api.CreateFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var innerErr error
|
||||
resp, innerErr = f.client.Do(req)
|
||||
return fserrors.ShouldRetry(innerErr), innerErr
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = json.NewDecoder(resp.Body).Decode(&result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
if result.Status != 200 {
|
||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||
}
|
||||
|
||||
fs.Infof(f, "Successfully created folder %q with ID %v", dirPath, result.Result.FldID)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getFolderList List both files and folders in a directory.
|
||||
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
|
||||
encodedDir := f.fromStandardPath(path)
|
||||
apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(encodedDir),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
var body []byte
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading response body: %w", err)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var response api.FolderListResponse
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
if response.Status != 200 {
|
||||
if strings.Contains(response.Msg, "Folder not found") {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("API error: %s", response.Msg)
|
||||
}
|
||||
|
||||
for index := range response.Result.Folders {
|
||||
response.Result.Folders[index].Path = f.toStandardPath(response.Result.Folders[index].Path)
|
||||
}
|
||||
|
||||
for index := range response.Result.Files {
|
||||
response.Result.Files[index].Name = f.toStandardPath(response.Result.Files[index].Name)
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
|
||||
}
|
||||
|
||||
// deleteFolder deletes a folder at the specified path.
|
||||
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
|
||||
fullPath = f.fromStandardPath(fullPath)
|
||||
deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(fullPath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
delResp := api.DeleteFolderResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &delResp); err != nil {
|
||||
return false, fmt.Errorf("error decoding delete response: %w", err)
|
||||
}
|
||||
if delResp.Status != 200 {
|
||||
return false, fmt.Errorf("delete error: %s", delResp.Msg)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(f, "Rmdir: successfully deleted %q", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDirectLink of file from FileLu to download.
|
||||
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
|
||||
filePath = f.fromStandardPath(filePath)
|
||||
apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(filePath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
result := api.FileDirectLinkResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
return result.Result.URL, result.Result.Size, nil
|
||||
}
|
||||
|
||||
// deleteFile deletes a file based on filePath
|
||||
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
|
||||
filePath = f.fromStandardPath(filePath)
|
||||
apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(filePath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
result := api.DeleteFileResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// getAccountInfo retrieves account information
|
||||
func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/account/info",
|
||||
Parameters: url.Values{
|
||||
"key": {f.opt.Key},
|
||||
},
|
||||
}
|
||||
|
||||
var result api.AccountInfoResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, callErr := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return fserrors.ShouldRetry(callErr), callErr
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getFileInfo retrieves file information based on file code
|
||||
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
|
||||
u, _ := url.Parse(f.endpoint + "/file/info2")
|
||||
q := u.Query()
|
||||
q.Set("file_code", fileCode) // raw path — Go handles escaping properly here
|
||||
q.Set("key", f.opt.Key)
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
apiURL := f.endpoint + "/file/info2?" + u.RawQuery
|
||||
|
||||
var body []byte
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading response body: %w", err)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := api.FileInfoResponse{}
|
||||
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 || len(result.Result) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
193
backend/filelu/filelu_file_uploader.go
Normal file
193
backend/filelu/filelu_file_uploader.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// uploadFile uploads a file to FileLu
|
||||
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
|
||||
directory := path.Dir(fileFullPath)
|
||||
fileName := path.Base(fileFullPath)
|
||||
if directory == "." {
|
||||
directory = ""
|
||||
}
|
||||
destinationFolderPath := path.Join(f.root, directory)
|
||||
if destinationFolderPath != "" {
|
||||
destinationFolderPath = "/" + strings.Trim(destinationFolderPath, "/")
|
||||
}
|
||||
|
||||
existingEntries, err := f.List(ctx, path.Dir(fileFullPath))
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||
err = f.Mkdir(ctx, path.Dir(fileFullPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("failed to list existing files: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, entry := range existingEntries {
|
||||
if entry.Remote() == fileFullPath {
|
||||
_, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the file exists but is different, remove it
|
||||
filePath := "/" + strings.Trim(destinationFolderPath+"/"+fileName, "/")
|
||||
err = f.deleteFile(ctx, filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete existing file: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uploadURL, sessID, err := f.getUploadServer(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve upload server: %w", err)
|
||||
}
|
||||
|
||||
// Since the fileCode isn't used, just handle the error
|
||||
if _, err := f.uploadFileWithDestination(ctx, uploadURL, sessID, fileName, fileContent, destinationFolderPath); err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getUploadServer gets the upload server URL with proper key authentication
|
||||
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
|
||||
apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key))
|
||||
|
||||
var result struct {
|
||||
Status int `json:"status"`
|
||||
SessID string `json:"sess_id"`
|
||||
Result string `json:"result"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return result.Result, result.SessID, nil
|
||||
}
|
||||
|
||||
// uploadFileWithDestination uploads a file directly to a specified folder using file content reader.
|
||||
func (f *Fs) uploadFileWithDestination(ctx context.Context, uploadURL, sessID, fileName string, fileContent io.Reader, dirPath string) (string, error) {
|
||||
destinationPath := f.fromStandardPath(dirPath)
|
||||
encodedFileName := f.fromStandardPath(fileName)
|
||||
pr, pw := io.Pipe()
|
||||
writer := multipart.NewWriter(pw)
|
||||
isDeletionRequired := false
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := pw.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close: %v", err)
|
||||
}
|
||||
}()
|
||||
_ = writer.WriteField("sess_id", sessID)
|
||||
_ = writer.WriteField("utype", "prem")
|
||||
_ = writer.WriteField("fld_path", destinationPath)
|
||||
|
||||
part, err := writer.CreateFormFile("file_0", encodedFileName)
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, fileContent); err != nil {
|
||||
isDeletionRequired = true
|
||||
pw.CloseWithError(fmt.Errorf("failed to copy file content: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("failed to close writer: %w", err))
|
||||
}
|
||||
}()
|
||||
|
||||
var fileCode string
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", uploadURL, pr)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create upload request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to send upload request: %w", err)
|
||||
}
|
||||
defer respBodyClose(resp.Body)
|
||||
|
||||
var result []struct {
|
||||
FileCode string `json:"file_code"`
|
||||
FileStatus string `json:"file_status"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("failed to parse upload response: %w", err)
|
||||
}
|
||||
|
||||
if len(result) == 0 || result[0].FileStatus != "OK" {
|
||||
return false, fmt.Errorf("upload failed with status: %s", result[0].FileStatus)
|
||||
}
|
||||
|
||||
fileCode = result[0].FileCode
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
|
||||
if err != nil && isDeletionRequired {
|
||||
// Attempt to delete the file if upload fails
|
||||
_ = f.deleteFile(ctx, destinationPath+"/"+fileName)
|
||||
}
|
||||
|
||||
return fileCode, err
|
||||
}
|
||||
|
||||
// respBodyClose to check body response.
|
||||
func respBodyClose(responseBody io.Closer) {
|
||||
if cerr := responseBody.Close(); cerr != nil {
|
||||
fmt.Printf("Error closing response body: %v\n", cerr)
|
||||
}
|
||||
}
|
||||
112
backend/filelu/filelu_helper.go
Normal file
112
backend/filelu/filelu_helper.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// errFileNotFound represent file not found error
|
||||
var errFileNotFound error = errors.New("file not found")
|
||||
|
||||
// getFileCode retrieves the file code for a given file path
|
||||
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
||||
// Prepare parent directory
|
||||
parentDir := path.Dir(filePath)
|
||||
|
||||
// Call List to get all the files
|
||||
result, err := f.getFolderList(ctx, parentDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, file := range result.Result.Files {
|
||||
filePathFromServer := parentDir + "/" + file.Name
|
||||
if parentDir == "/" {
|
||||
filePathFromServer = "/" + file.Name
|
||||
}
|
||||
if filePath == filePathFromServer {
|
||||
return file.FileCode, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errFileNotFound
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
func (f *Fs) fromStandardPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(remote)
|
||||
}
|
||||
|
||||
func (f *Fs) toStandardPath(remote string) string {
|
||||
return f.opt.Enc.ToStandardPath(remote)
|
||||
}
|
||||
|
||||
// Hashes returns an empty hash set, indicating no hash support
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet() // Properly creates an empty hash set
|
||||
}
|
||||
|
||||
// Name returns the remote name
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root path
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Precision returns the precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("FileLu root '%s'", f.root)
|
||||
}
|
||||
|
||||
// isFileCode checks if a string looks like a file code
|
||||
func isFileCode(s string) bool {
|
||||
if len(s) != 12 {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
return fserrors.ShouldRetry(err)
|
||||
}
|
||||
|
||||
func shouldRetryHTTP(code int) bool {
|
||||
return code == 429 || code >= 500
|
||||
}
|
||||
|
||||
func rootSplit(absPath string) (bucket, bucketPath string) {
|
||||
// No bucket
|
||||
if absPath == "" {
|
||||
return "", ""
|
||||
}
|
||||
slash := strings.IndexRune(absPath, '/')
|
||||
// Bucket but no path
|
||||
if slash < 0 {
|
||||
return absPath, ""
|
||||
}
|
||||
return absPath[:slash], absPath[slash+1:]
|
||||
}
|
||||
259
backend/filelu/filelu_object.go
Normal file
259
backend/filelu/filelu_object.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Object describes a FileLu object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// NewObject creates a new Object for the given remote path
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
var filePath string
|
||||
filePath = path.Join(f.root, remote)
|
||||
filePath = "/" + strings.Trim(filePath, "/")
|
||||
|
||||
// Get File code
|
||||
fileCode, err := f.getFileCode(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Get File info
|
||||
fileInfos, err := f.getFileInfo(ctx, fileCode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get file info: %w", err)
|
||||
}
|
||||
|
||||
fileInfo := fileInfos.Result[0]
|
||||
size, _ := strconv.ParseInt(fileInfo.Size, 10, 64)
|
||||
|
||||
returnedRemote := remote
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: returnedRemote,
|
||||
size: size,
|
||||
modTime: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Open opens the object for reading
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
filePath := path.Join(o.fs.root, o.remote)
|
||||
// Get direct link
|
||||
directLink, size, err := o.fs.getDirectLink(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get direct link: %w", err)
|
||||
}
|
||||
|
||||
o.size = size
|
||||
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reader io.ReadCloser
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create download request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to download file: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Wrap the response body to handle offset and count
|
||||
currentContents, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
if offset > 0 {
|
||||
if offset > int64(len(currentContents)) {
|
||||
return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents))
|
||||
}
|
||||
currentContents = currentContents[offset:]
|
||||
}
|
||||
if count > 0 && count < int64(len(currentContents)) {
|
||||
currentContents = currentContents[:count]
|
||||
}
|
||||
reader = io.NopCloser(bytes.NewReader(currentContents))
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Update updates the object with new data
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if src.Size() <= 0 {
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
err := o.fs.uploadFile(ctx, in, o.remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
o.size = src.Size()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the object from FileLu
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
fullPath := "/" + strings.Trim(path.Join(o.fs.root, o.remote), "/")
|
||||
|
||||
err := o.fs.deleteFile(ctx, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(o.fs, "Successfully deleted file: %s", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash returns the MD5 hash of an object
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
var fileCode string
|
||||
if isFileCode(o.fs.root) {
|
||||
fileCode = o.fs.root
|
||||
} else {
|
||||
matches := regexp.MustCompile(`\((.*?)\)`).FindAllStringSubmatch(o.remote, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) > 1 && len(match[1]) == 12 {
|
||||
fileCode = match[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if fileCode == "" {
|
||||
return "", fmt.Errorf("no valid file code found in the remote path")
|
||||
}
|
||||
|
||||
apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s",
|
||||
o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key))
|
||||
|
||||
var result struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result []struct {
|
||||
Hash string `json:"hash"`
|
||||
} `json:"result"`
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), err
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if result.Status != 200 || len(result.Result) == 0 {
|
||||
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
|
||||
}
|
||||
|
||||
return result.Result[0].Hash, nil
|
||||
}
|
||||
|
||||
// String returns a string representation of the object
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the object
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable indicates whether the object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
16
backend/filelu/filelu_test.go
Normal file
16
backend/filelu/filelu_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package filelu_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests for the FileLu backend
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFileLu:",
|
||||
NilObject: nil,
|
||||
SkipInvalidUTF8: true,
|
||||
})
|
||||
}
|
||||
15
backend/filelu/utils.go
Normal file
15
backend/filelu/utils.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// parseStorageToBytes converts a storage string (e.g., "10") to bytes
|
||||
func parseStorageToBytes(storage string) (int64, error) {
|
||||
var gb float64
|
||||
_, err := fmt.Sscanf(storage, "%f", &gb)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse storage: %w", err)
|
||||
}
|
||||
return int64(gb * 1024 * 1024 * 1024), nil
|
||||
}
|
||||
900
backend/filescom/filescom.go
Normal file
900
backend/filescom/filescom.go
Normal file
@@ -0,0 +1,900 @@
|
||||
// Package filescom provides an interface to the Files.com
|
||||
// object storage system.
|
||||
package filescom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
files_sdk "github.com/Files-com/files-sdk-go/v3"
|
||||
"github.com/Files-com/files-sdk-go/v3/bundle"
|
||||
"github.com/Files-com/files-sdk-go/v3/file"
|
||||
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
|
||||
"github.com/Files-com/files-sdk-go/v3/folder"
|
||||
"github.com/Files-com/files-sdk-go/v3/session"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
/*
|
||||
Run of rclone info
|
||||
stringNeedsEscaping = []rune{
|
||||
'/', '\x00'
|
||||
}
|
||||
maxFileLength = 512 // for 1 byte unicode characters
|
||||
maxFileLength = 512 // for 2 byte unicode characters
|
||||
maxFileLength = 512 // for 3 byte unicode characters
|
||||
maxFileLength = 512 // for 4 byte unicode characters
|
||||
canWriteUnnormalized = true
|
||||
canReadUnnormalized = true
|
||||
canReadRenormalized = true
|
||||
canStream = true
|
||||
*/
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
folderNotEmpty = "processing-failure/folder-not-empty"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filescom",
|
||||
Description: "Files.com",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "site",
|
||||
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "The username used to authenticate with Files.com.",
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "The password used to authenticate with Files.com.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "api_key",
|
||||
Help: "The API key used to authenticate with Files.com.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Site string `config:"site"`
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
APIKey string `config:"api_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote files.com server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
fileClient *file.Client // the connection to the file API
|
||||
folderClient *folder.Client // the connection to the folder API
|
||||
migrationClient *file_migration.Client // the connection to the file migration API
|
||||
bundleClient *bundle.Client // the connection to the bundle API
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a files object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64 // size of the object
|
||||
crc32 string // CRC32 of the object content
|
||||
md5 string // MD5 of the object content
|
||||
mimeType string // Content-Type of the object
|
||||
modTime time.Time // modification time of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("files root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Encode remote and turn it into an absolute path in the share
|
||||
func (f *Fs) absPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||
if slices.Contains(retryErrorCodes, apiErr.HttpCode) {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
|
||||
params := files_sdk.FileFindParams{
|
||||
Path: f.absPath(path),
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &file, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
config, err := newClientConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
fileClient: &file.Client{Config: config},
|
||||
folderClient: &folder.Client{Config: config},
|
||||
migrationClient: &file_migration.Client{Config: config},
|
||||
bundleClient: &bundle.Client{Config: config},
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.root != "" {
|
||||
info, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == nil && !info.IsDir() {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
||||
if opt.Site != "" {
|
||||
if strings.Contains(opt.Site, ".") {
|
||||
config.EndpointOverride = opt.Site
|
||||
} else {
|
||||
config.Subdomain = opt.Site
|
||||
}
|
||||
|
||||
_, err = url.ParseRequestURI(config.Endpoint())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
|
||||
|
||||
if opt.APIKey != "" {
|
||||
config.APIKey = opt.APIKey
|
||||
return
|
||||
}
|
||||
|
||||
if opt.Username == "" {
|
||||
err = errors.New("username not found")
|
||||
return
|
||||
}
|
||||
if opt.Password == "" {
|
||||
err = errors.New("password not found")
|
||||
return
|
||||
}
|
||||
opt.Password, err = obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sessionClient := session.Client{Config: config}
|
||||
params := files_sdk.SessionCreateParams{
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
}
|
||||
|
||||
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = fmt.Errorf("couldn't create session: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.SessionId = thisSession.Id
|
||||
return
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
var err error
|
||||
if file != nil {
|
||||
err = o.setMetaData(file)
|
||||
} else {
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
var it *folder.Iter
|
||||
params := files_sdk.FolderListForParams{
|
||||
Path: f.absPath(dir),
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
for it.Next() {
|
||||
item := ptr(it.File())
|
||||
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
|
||||
remote = path.Join(dir, remote)
|
||||
if remote == dir {
|
||||
continue
|
||||
}
|
||||
|
||||
if item.IsDir() {
|
||||
d := fs.NewDir(remote, item.ModTime())
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
err = it.Err()
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
err = f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, path string) error {
|
||||
if path == "" || path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
params := files_sdk.FolderCreateParams{
|
||||
Path: path,
|
||||
MkdirParents: ptr(true),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if files_sdk.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Make the parent directory of remote
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.mkdir(ctx, f.absPath(dir))
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
o := Object{
|
||||
fs: f,
|
||||
remote: dir,
|
||||
}
|
||||
return o.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
path := f.absPath(dir)
|
||||
if path == "" || path == "." {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: path,
|
||||
Recursive: ptr(!check),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
// Allow for eventual consistency deletion of child objects.
|
||||
if isFolderNotEmpty(err) {
|
||||
return true, err
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if isFolderNotEmpty(err) {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return fmt.Errorf("rmdir failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
srcPath := srcObj.fs.absPath(srcObj.remote)
|
||||
dstPath := f.absPath(remote)
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err = f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
params := files_sdk.FileCopyParams{
|
||||
Path: srcPath,
|
||||
Destination: dstPath,
|
||||
Overwrite: ptr(true),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "copy")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = dstObj.SetModTime(ctx, srcObj.modTime)
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
|
||||
// Move the object
|
||||
params := files_sdk.FileMoveParams{
|
||||
Path: src.absPath(srcRemote),
|
||||
Destination: f.absPath(dstRemote),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "move")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
|
||||
var migration files_sdk.FileMigration
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
|
||||
// noop
|
||||
}, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil && migration.Status != "completed" {
|
||||
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
_, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, dstRemote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
|
||||
return
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
|
||||
params := files_sdk.BundleCreateParams{
|
||||
Paths: []string{f.absPath(remote)},
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
|
||||
}
|
||||
|
||||
var bundle files_sdk.Bundle
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
url = bundle.Url
|
||||
return
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet(hash.CRC32, hash.MD5)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
switch t {
|
||||
case hash.CRC32:
|
||||
if o.crc32 == "" {
|
||||
return "", nil
|
||||
}
|
||||
return fmt.Sprintf("%08s", o.crc32), nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(file *files_sdk.File) error {
|
||||
o.modTime = file.ModTime()
|
||||
|
||||
if !file.IsDir() {
|
||||
o.size = file.Size
|
||||
o.crc32 = file.Crc32
|
||||
o.md5 = file.Md5
|
||||
o.mimeType = file.MimeType
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
if file.IsDir() {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
return o.setMetaData(file)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
params := files_sdk.FileUpdateParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
ProvidedMtime: &modTime,
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetaData(&file)
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Offset and Count for range download
|
||||
var offset, count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
params := files_sdk.FileDownloadParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
headers := &http.Header{}
|
||||
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.fileClient.Download(
|
||||
params,
|
||||
files_sdk.WithContext(ctx),
|
||||
files_sdk.RequestHeadersOption(headers),
|
||||
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
|
||||
in = closer
|
||||
return err
|
||||
}),
|
||||
)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Returns a pointer to t - useful for returning pointers to constants
|
||||
func ptr[T any](t T) *T {
|
||||
return &t
|
||||
}
|
||||
|
||||
func isFolderNotEmpty(err error) bool {
|
||||
var re files_sdk.ResponseError
|
||||
ok := errors.As(err, &re)
|
||||
return ok && re.Type == folderNotEmpty
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
uploadOpts := []file.UploadOption{
|
||||
file.UploadWithContext(ctx),
|
||||
file.UploadWithReader(in),
|
||||
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
|
||||
file.UploadWithProvidedMtime(src.ModTime(ctx)),
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Upload(uploadOpts...)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
17
backend/filescom/filescom_test.go
Normal file
17
backend/filescom/filescom_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Files filesystem interface
|
||||
package filescom_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/filescom"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFilesCom:",
|
||||
NilObject: (*filescom.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -85,7 +86,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
@@ -99,7 +100,7 @@ maximum of |--checkers| and |--transfers|.
|
||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`", -1),
|
||||
`, "|", "`"),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -180,12 +181,36 @@ If this is set and no password is supplied then rclone will ask for a password
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "http_proxy",
|
||||
Default: "",
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_upload",
|
||||
Default: false,
|
||||
Help: `Don't check the upload is OK
|
||||
|
||||
Normally rclone will try to check the upload exists after it has
|
||||
uploaded a file to make sure the size and modification time are as
|
||||
expected.
|
||||
|
||||
This flag stops rclone doing these checks. This enables uploading to
|
||||
folders which are write only.
|
||||
|
||||
You will likely need to use the --inplace flag also if uploading to
|
||||
a write only folder.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
@@ -232,6 +257,8 @@ type Options struct {
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -249,6 +276,7 @@ type Fs struct {
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
proxyURL *url.URL // address of HTTP proxy read from environment
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
@@ -396,11 +424,26 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
fs.Debugf(f, "dial(%q,%q)", network, address)
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "> dial: conn=%v, err=%v", conn, err)
|
||||
} else {
|
||||
fs.Debugf(f, "> dial: conn=%s->%s, err=%v", conn.LocalAddr(), conn.RemoteAddr(), err)
|
||||
}
|
||||
}()
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else if f.proxyURL != nil {
|
||||
// We need to make the onward connection to f.opt.Host. However the FTP
|
||||
// library sets the host to the proxy IP after using EPSV or PASV so we need
|
||||
// to correct that here.
|
||||
var dialPort string
|
||||
_, dialPort, err = net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
|
||||
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
@@ -614,6 +657,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
@@ -1303,6 +1354,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("update stor: %w", err)
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
if o.fs.opt.NoCheckUpload {
|
||||
o.info = &FileInfo{
|
||||
Name: o.remote,
|
||||
Size: uint64(src.Size()),
|
||||
ModTime: src.ModTime(ctx),
|
||||
precise: true,
|
||||
IsDir: false,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||
return fmt.Errorf("SetModTime: %w", err)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type settings map[string]interface{}
|
||||
type settings map[string]any
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
@@ -52,7 +52,7 @@ func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
ci.Timeout = saveTimeout
|
||||
}()
|
||||
ci.LowLevelRetries = 1
|
||||
ci.Timeout = idleTimeout
|
||||
ci.Timeout = fs.Duration(idleTimeout)
|
||||
|
||||
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
|
||||
fixFs := deriveFs(ctx, t, f, settings{
|
||||
|
||||
287
backend/gofile/api/types.go
Normal file
287
backend/gofile/api/types.go
Normal file
@@ -0,0 +1,287 @@
|
||||
// Package api has type definitions for gofile
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the
|
||||
// gofile API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from gofile when things go wrong
|
||||
type Error struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Status)
|
||||
return out
|
||||
}
|
||||
|
||||
// IsError returns true if there is an error
|
||||
func (e Error) IsError() bool {
|
||||
return e.Status != "ok"
|
||||
}
|
||||
|
||||
// Err returns err if not nil, or e if IsError or nil
|
||||
func (e Error) Err(err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.IsError() {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by /contents
|
||||
type Item struct {
|
||||
ID string `json:"id"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Code string `json:"code"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ModTime int64 `json:"modTime"`
|
||||
Link string `json:"link"`
|
||||
MD5 string `json:"md5"`
|
||||
MimeType string `json:"mimetype"`
|
||||
ChildrenCount int `json:"childrenCount"`
|
||||
DirectLinks map[string]*DirectLink `json:"directLinks"`
|
||||
//Public bool `json:"public"`
|
||||
//ServerSelected string `json:"serverSelected"`
|
||||
//Thumbnail string `json:"thumbnail"`
|
||||
//DownloadCount int `json:"downloadCount"`
|
||||
//TotalDownloadCount int64 `json:"totalDownloadCount"`
|
||||
//TotalSize int64 `json:"totalSize"`
|
||||
//ChildrenIDs []string `json:"childrenIds"`
|
||||
Children map[string]*Item `json:"children"`
|
||||
}
|
||||
|
||||
// ToNativeTime converts a go time to a native time
|
||||
func ToNativeTime(t time.Time) int64 {
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
// FromNativeTime converts native time to a go time
|
||||
func FromNativeTime(t int64) time.Time {
|
||||
return time.Unix(t, 0)
|
||||
}
|
||||
|
||||
// DirectLink describes a direct link to a file so it can be
|
||||
// downloaded by third parties.
|
||||
type DirectLink struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
DirectLink string `json:"directLink"`
|
||||
}
|
||||
|
||||
// Contents is returned from the /contents call
|
||||
type Contents struct {
|
||||
Error
|
||||
Data struct {
|
||||
Item
|
||||
} `json:"data"`
|
||||
Metadata Metadata `json:"metadata"`
|
||||
}
|
||||
|
||||
// Metadata is returned when paging is in use
|
||||
type Metadata struct {
|
||||
TotalCount int `json:"totalCount"`
|
||||
TotalPages int `json:"totalPages"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
}
|
||||
|
||||
// AccountsGetID is the result of /accounts/getid
|
||||
type AccountsGetID struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// Stats of storage and traffic
|
||||
type Stats struct {
|
||||
FolderCount int64 `json:"folderCount"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
Storage int64 `json:"storage"`
|
||||
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
|
||||
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
|
||||
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
|
||||
}
|
||||
|
||||
// AccountsGet is the result of /accounts/{id}
|
||||
type AccountsGet struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Email string `json:"email"`
|
||||
Tier string `json:"tier"`
|
||||
PremiumType string `json:"premiumType"`
|
||||
Token string `json:"token"`
|
||||
RootFolder string `json:"rootFolder"`
|
||||
SubscriptionProvider string `json:"subscriptionProvider"`
|
||||
SubscriptionEndDate int `json:"subscriptionEndDate"`
|
||||
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
|
||||
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
|
||||
StatsCurrent Stats `json:"statsCurrent"`
|
||||
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest is the input to /contents/createFolder
|
||||
type CreateFolderRequest struct {
|
||||
ParentFolderID string `json:"parentFolderId"`
|
||||
FolderName string `json:"folderName"`
|
||||
ModTime int64 `json:"modTime,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse is the output from /contents/createFolder
|
||||
type CreateFolderResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DeleteRequest is the input to DELETE /contents
|
||||
type DeleteRequest struct {
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /contents
|
||||
type DeleteResponse struct {
|
||||
Error
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// DirectUploadURL returns the direct upload URL for Gofile
|
||||
func DirectUploadURL() string {
|
||||
return "https://upload.gofile.io/uploadfile"
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
type UploadResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DirectLinksRequest specifies the parameters for the direct link
|
||||
type DirectLinksRequest struct {
|
||||
ExpireTime int64 `json:"expireTime,omitempty"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
|
||||
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
|
||||
Auth []any `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
// DirectLinksResult is returned from POST /contents/{id}/directlinks
|
||||
type DirectLinksResult struct {
|
||||
Error
|
||||
Data struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
ID string `json:"id"`
|
||||
DirectLink string `json:"directLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
|
||||
//
|
||||
// The Value of the attribute to define :
|
||||
// For Attribute "name" : The name of the content (file or folder)
|
||||
// For Attribute "description" : The description displayed on the download page (folder only)
|
||||
// For Attribute "tags" : A comma-separated list of tags (folder only)
|
||||
// For Attribute "public" : either true or false (folder only)
|
||||
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
|
||||
// For Attribute "password" : The password to set (folder only)
|
||||
type UpdateItemRequest struct {
|
||||
Attribute string `json:"attribute"`
|
||||
Value any `json:"attributeValue"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /contents/{id}/update
|
||||
type UpdateItemResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /contents/move
|
||||
type MoveRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /contents/move
|
||||
type MoveResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /contents/copy
|
||||
type CopyRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /contents/copy
|
||||
type CopyResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadServerStatus is returned when fetching the root of an upload server
|
||||
type UploadServerStatus struct {
|
||||
Error
|
||||
Data struct {
|
||||
Server string `json:"server"`
|
||||
Test string `json:"test"`
|
||||
} `json:"data"`
|
||||
}
|
||||
1548
backend/gofile/gofile.go
Normal file
1548
backend/gofile/gofile.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/gofile/gofile_test.go
Normal file
17
backend/gofile/gofile_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Gofile filesystem interface
|
||||
package gofile_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/gofile"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoFile:",
|
||||
NilObject: (*gofile.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -35,7 +35,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -62,9 +62,10 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
storageConfig = &oauthutil.Config{
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -106,6 +107,12 @@ func init() {
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Sensitive: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
@@ -379,6 +386,7 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
AccessToken string `config:"access_token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -475,6 +483,9 @@ func parsePath(path string) (root string) {
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") {
|
||||
bucketPath = bucketPath[:len(bucketPath)-1]
|
||||
}
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -535,6 +546,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else if opt.AccessToken != "" {
|
||||
ts := oauth2.Token{AccessToken: opt.AccessToken}
|
||||
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
@@ -701,7 +715,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
@@ -834,7 +848,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
list := list.NewHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -944,12 +958,11 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
|
||||
@@ -4,6 +4,7 @@ package googlephotos
|
||||
|
||||
import (
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -119,7 +120,7 @@ func (as *albums) _del(album *api.Album) {
|
||||
dirs := as.path[dir]
|
||||
for i, dir := range dirs {
|
||||
if dir == leaf {
|
||||
dirs = append(dirs[:i], dirs[i+1:]...)
|
||||
dirs = slices.Delete(dirs, i, i+1)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,13 +28,11 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
@@ -45,6 +43,7 @@ var (
|
||||
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
|
||||
errRemove = errors.New("google photos API only implements removing files from albums")
|
||||
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
|
||||
errReadOnly = errors.New("can't upload files in read only mode")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -54,20 +53,33 @@ const (
|
||||
listChunks = 100 // chunk size to read directory listings
|
||||
albumChunks = 50 // chunk size to read album listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
scopeAccess = 2 // position of access scope in list
|
||||
scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly"
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata"
|
||||
)
|
||||
|
||||
var (
|
||||
// scopes needed for read write access
|
||||
scopesReadWrite = []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeAppendOnly,
|
||||
scopeReadOnly,
|
||||
scopeReadWrite,
|
||||
}
|
||||
|
||||
// scopes needed for read only access
|
||||
scopesReadOnly = []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadOnly,
|
||||
}
|
||||
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: scopesReadWrite,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -101,20 +113,26 @@ func init() {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
||||
oauthConfig.Scopes = scopesReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
||||
oauthConfig.Scopes = scopesReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
return oauthutil.ConfigOut("warning1", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
case "warning":
|
||||
case "warning1":
|
||||
// Warn the user as required by google photos integration
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
return fs.ConfigConfirm("warning2", true, "config_warning", `Warning
|
||||
|
||||
IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
are stored in full resolution at original quality. These uploads
|
||||
will count towards storage in your Google Account.`)
|
||||
|
||||
case "warning2":
|
||||
// Warn the user that rclone can no longer download photos it didnt upload from google photos
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
IMPORTANT: Due to Google policy changes rclone can now only download photos it uploaded.`)
|
||||
|
||||
case "warning_done":
|
||||
return nil, nil
|
||||
}
|
||||
@@ -160,6 +178,34 @@ listings and transferred.
|
||||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "proxy",
|
||||
Default: "",
|
||||
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
|
||||
|
||||
The Google API will deliver images and video which aren't full
|
||||
resolution, and/or have EXIF data missing.
|
||||
|
||||
However if you use the gphotosdl proxy then you can download original,
|
||||
unchanged images.
|
||||
|
||||
This runs a headless browser in the background.
|
||||
|
||||
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
|
||||
|
||||
First run with
|
||||
|
||||
gphotosdl -login
|
||||
|
||||
Then once you have logged into google photos close the browser window
|
||||
and run
|
||||
|
||||
gphotosdl
|
||||
|
||||
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
|
||||
rclone use the proxy.
|
||||
`, "|", "`"),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -181,6 +227,7 @@ type Options struct {
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
Proxy string `config:"proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -305,7 +352,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
return nil, fmt.Errorf("failed to configure google photos: %w", err)
|
||||
}
|
||||
|
||||
root = strings.Trim(path.Clean(root), "/")
|
||||
@@ -360,7 +407,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
||||
Method: "GET",
|
||||
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
|
||||
}
|
||||
var openIDconfig map[string]interface{}
|
||||
var openIDconfig map[string]any
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
@@ -420,7 +467,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
"token_type_hint": []string{"access_token"},
|
||||
},
|
||||
}
|
||||
var res interface{}
|
||||
var res any
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
@@ -454,7 +501,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("")
|
||||
// defer log.Trace(f, "remote=%q", remote)("")
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
@@ -667,7 +714,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil || pattern.isFile {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
@@ -684,7 +731,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
defer log.Trace(f, "src=%+v", src)("")
|
||||
// defer log.Trace(f, "src=%+v", src)("")
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
@@ -737,7 +784,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
|
||||
|
||||
// Mkdir creates the album if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
@@ -761,7 +808,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
match, _, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
@@ -834,7 +881,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||
return o.bytes
|
||||
}
|
||||
@@ -935,7 +982,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||
@@ -965,16 +1012,20 @@ func (o *Object) downloadURL() string {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
url := o.downloadURL()
|
||||
if o.fs.opt.Proxy != "" {
|
||||
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.downloadURL(),
|
||||
RootURL: url,
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1067,7 +1118,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||
return errCantUpload
|
||||
@@ -1088,6 +1139,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
if !album.IsWriteable {
|
||||
if o.fs.opt.ReadOnly {
|
||||
return errReadOnly
|
||||
}
|
||||
return errOwnAlbums
|
||||
}
|
||||
|
||||
@@ -1136,7 +1190,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
errors := make([]error, 1)
|
||||
results := make([]*api.MediaItem, 1)
|
||||
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
err = errors[0]
|
||||
info = results[0]
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -35,7 +36,7 @@ func TestIntegration(t *testing.T) {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "drop":
|
||||
return nil, f.db.Stop(true)
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
@@ -182,6 +183,9 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
// Enable ListP always
|
||||
f.features.ListP = f.ListP
|
||||
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
return f, err
|
||||
}
|
||||
@@ -237,10 +241,39 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if entries, err = f.Fs.List(ctx, dir); err != nil {
|
||||
return nil, err
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := f.wrapEntries(entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
return f.wrapEntries(entries)
|
||||
listP := f.Fs.Features().ListP
|
||||
if listP == nil {
|
||||
entries, err := f.Fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, dir, wrappedCallback)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories recursively into out.
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -195,9 +196,7 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
|
||||
r.Fp = op.fp
|
||||
}
|
||||
|
||||
for hashType, hashVal := range op.hashes {
|
||||
r.Hashes[hashType] = hashVal
|
||||
}
|
||||
maps.Copy(r.Hashes, op.hashes)
|
||||
if data, err = r.encode(op.key); err != nil {
|
||||
return fmt.Errorf("marshal failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -48,11 +47,9 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app.
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
|
||||
@@ -52,10 +52,7 @@ func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *ui
|
||||
total := len(p)
|
||||
nullBytes := make([]byte, blockSize)
|
||||
for len(p) > 0 {
|
||||
toWrite := int(blockSize - *bytesInBlock)
|
||||
if toWrite > len(p) {
|
||||
toWrite = len(p)
|
||||
}
|
||||
toWrite := min(int(blockSize-*bytesInBlock), len(p))
|
||||
c, err := writer.Write(p[:toWrite])
|
||||
*bytesInBlock += uint32(c)
|
||||
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
|
||||
@@ -276,7 +273,7 @@ func (h *hidriveHash) Sum(b []byte) []byte {
|
||||
}
|
||||
|
||||
checksum := zeroSum
|
||||
for i := 0; i < len(h.levels); i++ {
|
||||
for i := range h.levels {
|
||||
level := h.levels[i]
|
||||
if i < len(h.levels)-1 {
|
||||
// Aggregate non-empty non-final levels.
|
||||
|
||||
@@ -216,7 +216,7 @@ func TestLevelWrite(t *testing.T) {
|
||||
func TestLevelIsFull(t *testing.T) {
|
||||
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
||||
l := hidrivehash.NewLevel()
|
||||
for i := 0; i < 256; i++ {
|
||||
for range 256 {
|
||||
assert.False(t, l.(internal.LevelHash).IsFull())
|
||||
written, err := l.Write(content[:])
|
||||
assert.Equal(t, len(content), written)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user