mirror of
https://github.com/rclone/rclone.git
synced 2025-12-14 15:23:18 +00:00
Compare commits
1261 Commits
v1.63.1
...
fix-oauth-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bbb31d6acf | ||
|
|
7c705e0efa | ||
|
|
175aa07cdd | ||
|
|
75257fc9cd | ||
|
|
53ff3b3b32 | ||
|
|
8b4b59412d | ||
|
|
264c9fb2c0 | ||
|
|
1b10cd3732 | ||
|
|
d97492cbc3 | ||
|
|
82a510e793 | ||
|
|
9f2c590e13 | ||
|
|
11a90917ec | ||
|
|
8ca7b2af07 | ||
|
|
a19ddffe92 | ||
|
|
3e2c0f8c04 | ||
|
|
589458d1fe | ||
|
|
69897b97fb | ||
|
|
4db09331c6 | ||
|
|
fcd3b88332 | ||
|
|
1ca3f12672 | ||
|
|
e7a0fd0f70 | ||
|
|
c23c59544d | ||
|
|
9dec3de990 | ||
|
|
5caa695c79 | ||
|
|
8400809900 | ||
|
|
e49516d5f4 | ||
|
|
9614fc60f2 | ||
|
|
51db76fd47 | ||
|
|
17e7ccfad5 | ||
|
|
8a6fc8535d | ||
|
|
c053429b9c | ||
|
|
18989fbf85 | ||
|
|
a7451c6a77 | ||
|
|
5147d1101c | ||
|
|
11ad2a1316 | ||
|
|
3c7ad8d961 | ||
|
|
a3e8fb584a | ||
|
|
9b4b3033da | ||
|
|
94997d25d2 | ||
|
|
19458e8459 | ||
|
|
7d32da441e | ||
|
|
22e13eea47 | ||
|
|
de9b593f02 | ||
|
|
b2b4f8196c | ||
|
|
84cebb6872 | ||
|
|
cb9f4f8461 | ||
|
|
498d9cfa85 | ||
|
|
109e4ed0ed | ||
|
|
353270263a | ||
|
|
f8d782c02d | ||
|
|
3dec664a19 | ||
|
|
a849fd59f0 | ||
|
|
462a1cf491 | ||
|
|
0b7b3cacdc | ||
|
|
976103d50b | ||
|
|
192524c004 | ||
|
|
28667f58bf | ||
|
|
c669f4e218 | ||
|
|
1a9e6a527d | ||
|
|
8c48cadd9c | ||
|
|
76e1ba8c46 | ||
|
|
232e4cd18f | ||
|
|
88141928f2 | ||
|
|
a2a0388036 | ||
|
|
48543d38e8 | ||
|
|
eceb390152 | ||
|
|
f4deffdc96 | ||
|
|
c172742cef | ||
|
|
7daed30754 | ||
|
|
b1b4c7f27b | ||
|
|
ed84553dc1 | ||
|
|
c94edbb76b | ||
|
|
2dcb327bc0 | ||
|
|
874d66658e | ||
|
|
3af757e26d | ||
|
|
fef1b61585 | ||
|
|
3fca7a60a5 | ||
|
|
6b3f41fa0c | ||
|
|
3d0ee47aa2 | ||
|
|
da70088b11 | ||
|
|
1bc9b94cf2 | ||
|
|
15a026d3be | ||
|
|
ad122c6f6f | ||
|
|
b155231cdd | ||
|
|
49f69196c2 | ||
|
|
3f7651291b | ||
|
|
796013dd06 | ||
|
|
e0da406ca7 | ||
|
|
9a02c04028 | ||
|
|
918185273f | ||
|
|
3f2074901a | ||
|
|
648afc7df4 | ||
|
|
16e0245a8e | ||
|
|
59acb9dfa9 | ||
|
|
bfec159504 | ||
|
|
842396c8a0 | ||
|
|
5f9a201b45 | ||
|
|
22583d0a5f | ||
|
|
f1466a429c | ||
|
|
e3b09211b8 | ||
|
|
156feff9f2 | ||
|
|
b29a22095f | ||
|
|
861c01caf5 | ||
|
|
f1a84d171e | ||
|
|
bcdfad3c83 | ||
|
|
88b0757288 | ||
|
|
33d6c3f92f | ||
|
|
752809309d | ||
|
|
4a54cc134f | ||
|
|
dfc2c98bbf | ||
|
|
604d6bcb9c | ||
|
|
d15704ef9f | ||
|
|
26bc9826e5 | ||
|
|
2a28b0eaf0 | ||
|
|
2d1c2b1f76 | ||
|
|
ffb2e2a6de | ||
|
|
c9c283533c | ||
|
|
71799d7efd | ||
|
|
8f4fdf6cc8 | ||
|
|
91b11f9eac | ||
|
|
b49927fbd0 | ||
|
|
1a8b7662e7 | ||
|
|
6ba3e24853 | ||
|
|
802a938bd1 | ||
|
|
9deb3e8adf | ||
|
|
296281a6eb | ||
|
|
711478554e | ||
|
|
906aef91fa | ||
|
|
6b58cd0870 | ||
|
|
af9f8ced80 | ||
|
|
c63f1865f3 | ||
|
|
1bb89bc818 | ||
|
|
a365503750 | ||
|
|
3bb6d0a42b | ||
|
|
f65755b3a3 | ||
|
|
33c5f35935 | ||
|
|
4367b999c9 | ||
|
|
b57e6213aa | ||
|
|
cd90ba4337 | ||
|
|
0e5eb7a9bb | ||
|
|
956c2963fd | ||
|
|
146562975b | ||
|
|
4c1cb0622e | ||
|
|
258092f9c6 | ||
|
|
be448c9e13 | ||
|
|
4e708e59f2 | ||
|
|
c8366dfef3 | ||
|
|
1e14523b82 | ||
|
|
da25305ba0 | ||
|
|
e439121ab2 | ||
|
|
37c12732f9 | ||
|
|
4c488e7517 | ||
|
|
7261f47bd2 | ||
|
|
1db8b20fbc | ||
|
|
a87d8967fc | ||
|
|
4804f1f1e9 | ||
|
|
d1c84f9115 | ||
|
|
e0b08883cb | ||
|
|
a0af72c27a | ||
|
|
28d6985764 | ||
|
|
f2ce9a9557 | ||
|
|
95151eac82 | ||
|
|
bd9bf4eb1c | ||
|
|
705c72d293 | ||
|
|
330c6702eb | ||
|
|
4d787ae87f | ||
|
|
86e9a56d73 | ||
|
|
64e8013c1b | ||
|
|
33bff6fe71 | ||
|
|
e82b5b11af | ||
|
|
4454ed9d3b | ||
|
|
bad8207378 | ||
|
|
c6d3714e73 | ||
|
|
59501fcdb6 | ||
|
|
afd199d756 | ||
|
|
00e073df1e | ||
|
|
2e007f89c7 | ||
|
|
edd9347694 | ||
|
|
1fad49ee35 | ||
|
|
182b2a6417 | ||
|
|
da9faf1ffe | ||
|
|
303358eeda | ||
|
|
62233b4993 | ||
|
|
498abcc062 | ||
|
|
482bfae8fa | ||
|
|
ae9960a4ed | ||
|
|
089c168fb9 | ||
|
|
6f515ded8f | ||
|
|
91c6faff71 | ||
|
|
874616a73e | ||
|
|
458d93ea7e | ||
|
|
513653910c | ||
|
|
bd5199910b | ||
|
|
f6d836eefd | ||
|
|
87ec26001f | ||
|
|
3e12612aae | ||
|
|
aee2480fc4 | ||
|
|
3ffa47ea16 | ||
|
|
70e8ad456f | ||
|
|
55b9b3e33a | ||
|
|
ce7dfa075c | ||
|
|
a697d27455 | ||
|
|
cae22a7562 | ||
|
|
877321c2fb | ||
|
|
574378e871 | ||
|
|
50d42babd8 | ||
|
|
13ea77dd71 | ||
|
|
62b76b631c | ||
|
|
96f92b7364 | ||
|
|
7c02a63884 | ||
|
|
67d4394a37 | ||
|
|
c1a98768bc | ||
|
|
bac9abebfb | ||
|
|
27b281ef69 | ||
|
|
10270a4354 | ||
|
|
d08b49d723 | ||
|
|
cb2d2d72a0 | ||
|
|
e686e34f89 | ||
|
|
5f66350331 | ||
|
|
e1d935b854 | ||
|
|
61b27cda80 | ||
|
|
83613634f9 | ||
|
|
1c80cbd13a | ||
|
|
9d5315a944 | ||
|
|
8d1d096c11 | ||
|
|
4b922d86d7 | ||
|
|
3b3625037c | ||
|
|
bfa3278f30 | ||
|
|
e334366345 | ||
|
|
642d4082ac | ||
|
|
024ff6ed15 | ||
|
|
d6b0743cf4 | ||
|
|
e4749cf0d0 | ||
|
|
8d2907d8f5 | ||
|
|
1720d3e11c | ||
|
|
c6352231e4 | ||
|
|
731947f3ca | ||
|
|
16d642825d | ||
|
|
50aebcf403 | ||
|
|
c8555d1b16 | ||
|
|
3ec0ff5d8f | ||
|
|
746516511d | ||
|
|
8aef1de695 | ||
|
|
cb611b8330 | ||
|
|
66ae050a8b | ||
|
|
fd9049c83d | ||
|
|
a1f52bcf50 | ||
|
|
0470450583 | ||
|
|
1901bae4eb | ||
|
|
9866d1c636 | ||
|
|
c5c7bcdd45 | ||
|
|
d5c7b55ba5 | ||
|
|
feafbfca52 | ||
|
|
abe01179ae | ||
|
|
612c717ea0 | ||
|
|
f26d2c6ba8 | ||
|
|
dcecb0ede4 | ||
|
|
47588a7fd0 | ||
|
|
ba381f8721 | ||
|
|
8f0ddcca4e | ||
|
|
404ef80025 | ||
|
|
13fa583368 | ||
|
|
e111ffba9e | ||
|
|
30ba7542ff | ||
|
|
31fabb3402 | ||
|
|
b3edc9d360 | ||
|
|
04f35fc3ac | ||
|
|
8e5dd79e4d | ||
|
|
b809e71d6f | ||
|
|
d149d1ec3e | ||
|
|
3b51ad24b2 | ||
|
|
485aa90d13 | ||
|
|
8958d06456 | ||
|
|
ca24447090 | ||
|
|
d008381e59 | ||
|
|
14629c66f9 | ||
|
|
4824837eed | ||
|
|
5287a9b5fa | ||
|
|
f2ce1767f0 | ||
|
|
7f048ac901 | ||
|
|
b0d0e0b267 | ||
|
|
f5eef420a4 | ||
|
|
9de485f949 | ||
|
|
d4b29fef92 | ||
|
|
471531eb6a | ||
|
|
afd2663057 | ||
|
|
97d6a00483 | ||
|
|
5ddedae431 | ||
|
|
e1b7bf7701 | ||
|
|
2a615f4681 | ||
|
|
e041796bfe | ||
|
|
1b9217bc78 | ||
|
|
846c1aeed0 | ||
|
|
56caab2033 | ||
|
|
495a5759d3 | ||
|
|
d9bd6f35f2 | ||
|
|
532a0818f7 | ||
|
|
91558ce6aa | ||
|
|
8fbb259091 | ||
|
|
4d2bc190cc | ||
|
|
c2bf300dd8 | ||
|
|
c954c397d9 | ||
|
|
25c6379688 | ||
|
|
ce1859cd82 | ||
|
|
cf25ae69ad | ||
|
|
dce8317042 | ||
|
|
eff2497633 | ||
|
|
28ba4b832d | ||
|
|
58da1a165c | ||
|
|
eec95a164d | ||
|
|
44cd2e07ca | ||
|
|
a28287e96d | ||
|
|
fc1d8dafd5 | ||
|
|
2c57fe9826 | ||
|
|
7c51b10d15 | ||
|
|
3280b6b83c | ||
|
|
1a77a2f92b | ||
|
|
c156716d01 | ||
|
|
0d9d0eef4c | ||
|
|
2e653f8128 | ||
|
|
e79273f9c9 | ||
|
|
8e10fe71f7 | ||
|
|
c6ab37a59f | ||
|
|
671a15f65f | ||
|
|
8d72698d5a | ||
|
|
6e853c82d8 | ||
|
|
27267547b9 | ||
|
|
cdcf0e5cb8 | ||
|
|
6507770014 | ||
|
|
bd5799c079 | ||
|
|
c834eb7dcb | ||
|
|
754e53dbcc | ||
|
|
5511fa441a | ||
|
|
4ed4483bbc | ||
|
|
0e85ba5080 | ||
|
|
e5095a7d7b | ||
|
|
300851e8bf | ||
|
|
cbccad9491 | ||
|
|
9f1a7cfa67 | ||
|
|
d84a4c9ac1 | ||
|
|
1c9da8c96a | ||
|
|
af9c5fef93 | ||
|
|
7060777d1d | ||
|
|
0197e7f4e5 | ||
|
|
c1c9e209f3 | ||
|
|
fd182af866 | ||
|
|
4ea629446f | ||
|
|
93e8a976ef | ||
|
|
8470bdf810 | ||
|
|
1aa3a37a28 | ||
|
|
ae887ad042 | ||
|
|
d279fea44a | ||
|
|
282e34f2d5 | ||
|
|
021f25a748 | ||
|
|
18e9d039ad | ||
|
|
cbcfb90d9a | ||
|
|
caba22a585 | ||
|
|
3fef8016b5 | ||
|
|
edf6537c61 | ||
|
|
00f0e9df9d | ||
|
|
e6ab644350 | ||
|
|
61c18e3b60 | ||
|
|
d068e0b1a9 | ||
|
|
a341065b8d | ||
|
|
0c29a1fe31 | ||
|
|
1a40300b5f | ||
|
|
44be27729a | ||
|
|
b7624287ac | ||
|
|
6db9f7180f | ||
|
|
b601961e54 | ||
|
|
51aca9cf9d | ||
|
|
7c0645dda9 | ||
|
|
aed77a8fb2 | ||
|
|
4250dd98f3 | ||
|
|
c13118246c | ||
|
|
a56cd52025 | ||
|
|
3ae4534ce6 | ||
|
|
9c287c72d6 | ||
|
|
862d5d6086 | ||
|
|
003f4531fe | ||
|
|
a52e887ddd | ||
|
|
b7681e72bf | ||
|
|
ce5024bf33 | ||
|
|
d2af114139 | ||
|
|
c8d6b02dd6 | ||
|
|
55cac4c34d | ||
|
|
7ce60a47e8 | ||
|
|
27496fb26d | ||
|
|
39f8d039fe | ||
|
|
57f5ad188b | ||
|
|
76798d5bb1 | ||
|
|
5921bb0efd | ||
|
|
ce0d8a70a3 | ||
|
|
c23a40cb2a | ||
|
|
86a1951a56 | ||
|
|
b778ec0142 | ||
|
|
dac7f76b14 | ||
|
|
446d6b28b8 | ||
|
|
7e04ff9528 | ||
|
|
4568feb5f9 | ||
|
|
b9a2d3b6b9 | ||
|
|
775e567a7b | ||
|
|
59fc7ac193 | ||
|
|
fea61cac9e | ||
|
|
3f3e4b055e | ||
|
|
2257c03391 | ||
|
|
8f1c309c81 | ||
|
|
8e2f596fd0 | ||
|
|
de742ffc67 | ||
|
|
181ed55662 | ||
|
|
a5700a4a53 | ||
|
|
faa58315c5 | ||
|
|
7b89735ae7 | ||
|
|
91192c2c5e | ||
|
|
96e39ea486 | ||
|
|
488ed28635 | ||
|
|
b059c96322 | ||
|
|
6d22168a8c | ||
|
|
e34e2df600 | ||
|
|
6607102034 | ||
|
|
c6c327e4e7 | ||
|
|
6a0a54ab97 | ||
|
|
629e895da8 | ||
|
|
cc634213a5 | ||
|
|
e9e9feb21e | ||
|
|
f26fc8f07c | ||
|
|
96703bb31e | ||
|
|
96d3adc771 | ||
|
|
f82822baca | ||
|
|
af33a4f822 | ||
|
|
a675cc6677 | ||
|
|
ad605ee356 | ||
|
|
4ab235c06c | ||
|
|
9a2b85d71c | ||
|
|
29b58dd4c5 | ||
|
|
36ad4eb145 | ||
|
|
61ab519791 | ||
|
|
678941afc1 | ||
|
|
b153254b3a | ||
|
|
17cd7a9496 | ||
|
|
0735f44f91 | ||
|
|
04c69959b8 | ||
|
|
25cc8c927a | ||
|
|
6356b51b33 | ||
|
|
1890608f55 | ||
|
|
cd76fd9219 | ||
|
|
5b8cdaff39 | ||
|
|
f2f559230c | ||
|
|
e0b38cc9ac | ||
|
|
68dc79eddd | ||
|
|
76cea0c704 | ||
|
|
41d5d8b88a | ||
|
|
aa2746d0de | ||
|
|
b2f6aac754 | ||
|
|
a0dacf4930 | ||
|
|
c5ff5afc21 | ||
|
|
bd8523f208 | ||
|
|
0bfd70c405 | ||
|
|
47735d8fe1 | ||
|
|
617534112b | ||
|
|
271ec43189 | ||
|
|
10eb4742dd | ||
|
|
2a2ec06ec1 | ||
|
|
7237b142fa | ||
|
|
254e514330 | ||
|
|
9fa610088f | ||
|
|
d2fa45acf3 | ||
|
|
a86eb7ad50 | ||
|
|
1fef8e667c | ||
|
|
a5daef3892 | ||
|
|
5bf70c68f1 | ||
|
|
8a18c29835 | ||
|
|
29ed17d19c | ||
|
|
7ee22fcdf9 | ||
|
|
159e274921 | ||
|
|
fdc56b21c1 | ||
|
|
1ca825b6f0 | ||
|
|
d36bc8833c | ||
|
|
8977655869 | ||
|
|
58e09e1cd4 | ||
|
|
64734dfe41 | ||
|
|
68bf6aa584 | ||
|
|
db17aaf7cd | ||
|
|
9531cd2c46 | ||
|
|
c09426bcfe | ||
|
|
30517698aa | ||
|
|
8dc4c01209 | ||
|
|
807a7dabaa | ||
|
|
416324c047 | ||
|
|
524137f78a | ||
|
|
f4c033a6a6 | ||
|
|
d459fb0cb8 | ||
|
|
205745313d | ||
|
|
79c00879ff | ||
|
|
2cff5514aa | ||
|
|
88322f3eb2 | ||
|
|
036690c060 | ||
|
|
805584a8dd | ||
|
|
cc3ae931db | ||
|
|
0c0d64c316 | ||
|
|
50aa677934 | ||
|
|
51582e36e8 | ||
|
|
47cbddbd27 | ||
|
|
5323a21898 | ||
|
|
f2e693f722 | ||
|
|
93955b755f | ||
|
|
a4fc5edc5e | ||
|
|
8b73dcb95d | ||
|
|
3ba57cabce | ||
|
|
c87097109b | ||
|
|
ae76498a38 | ||
|
|
10f730c49f | ||
|
|
88d96d133b | ||
|
|
2c7680050b | ||
|
|
fe6c9aa4da | ||
|
|
8524afa9ce | ||
|
|
21f3ba13f6 | ||
|
|
04128f97ee | ||
|
|
bef9fd0bc3 | ||
|
|
2ab2ec29f9 | ||
|
|
8817ee25ae | ||
|
|
46b3854330 | ||
|
|
efbaca3a95 | ||
|
|
f995ece64d | ||
|
|
68c2ba74dd | ||
|
|
e739ee2c27 | ||
|
|
05e5712bc4 | ||
|
|
a2e38e9883 | ||
|
|
ef42c32cc6 | ||
|
|
6a5c0065ef | ||
|
|
6da27db844 | ||
|
|
c0497d46d5 | ||
|
|
df3df06d2e | ||
|
|
1e3ab7acfd | ||
|
|
339bc1d1a3 | ||
|
|
71069ed5c1 | ||
|
|
75df38f6ee | ||
|
|
ce4064aabf | ||
|
|
7c9f1b8917 | ||
|
|
e71c95a554 | ||
|
|
e053c8a1c0 | ||
|
|
c2d96113ac | ||
|
|
5ff961d2ea | ||
|
|
eedeaf7cbb | ||
|
|
f6e716543a | ||
|
|
998df26ceb | ||
|
|
93c960df59 | ||
|
|
92368f6d2b | ||
|
|
08bf5228a7 | ||
|
|
76f3eb3ed2 | ||
|
|
0d43da7655 | ||
|
|
f9429de807 | ||
|
|
bce80be2f8 | ||
|
|
679f4fdfa9 | ||
|
|
7c828ffe09 | ||
|
|
1cf1f4fab2 | ||
|
|
853e802d8d | ||
|
|
3052d026ce | ||
|
|
9c9487365f | ||
|
|
0f6c10ca02 | ||
|
|
a075654f20 | ||
|
|
571d20d126 | ||
|
|
c9ce384ec7 | ||
|
|
748c43d525 | ||
|
|
7c20ec3772 | ||
|
|
42914bc0b0 | ||
|
|
f62e7b5b30 | ||
|
|
2b0a25a64d | ||
|
|
2bebbfaded | ||
|
|
fecce67ac6 | ||
|
|
a67688dcc7 | ||
|
|
f3f743c3f9 | ||
|
|
ac6ba11d22 | ||
|
|
854a36c4ab | ||
|
|
522ab1de6d | ||
|
|
215ae17272 | ||
|
|
efed6b01d2 | ||
|
|
d11fe9779e | ||
|
|
f167846fb9 | ||
|
|
1f4b433ace | ||
|
|
4d09320b2b | ||
|
|
af313d66d5 | ||
|
|
4b5c10f72e | ||
|
|
dfc329c036 | ||
|
|
d9601c78b1 | ||
|
|
4258ad705e | ||
|
|
070cff8a65 | ||
|
|
a24aeba495 | ||
|
|
bf494d48d6 | ||
|
|
aee8d909b3 | ||
|
|
48262849df | ||
|
|
09cc8179cc | ||
|
|
ff855fe1fb | ||
|
|
5ee89bdcf8 | ||
|
|
f7bf28806c | ||
|
|
df6c573c99 | ||
|
|
b7c06e5eb9 | ||
|
|
e0e9ac50d3 | ||
|
|
f68d962c86 | ||
|
|
6232cc123f | ||
|
|
a33576af7d | ||
|
|
2591703494 | ||
|
|
7803b4ed6c | ||
|
|
00fb847662 | ||
|
|
c7bfadd10a | ||
|
|
ca903b9872 | ||
|
|
b7783f75a4 | ||
|
|
b6013a5e68 | ||
|
|
b7422a4fc8 | ||
|
|
9b650d3517 | ||
|
|
ff0acfb568 | ||
|
|
ac830ddd42 | ||
|
|
f491efc85d | ||
|
|
fcb182efce | ||
|
|
1473de3f04 | ||
|
|
4e07a72dc7 | ||
|
|
99acee7ba0 | ||
|
|
bda4f25baa | ||
|
|
9f2ce2c7fc | ||
|
|
6e85a39e99 | ||
|
|
24b4148b5e | ||
|
|
41b1250eaf | ||
|
|
339d3e8ee6 | ||
|
|
5750795324 | ||
|
|
cdcb8b2a0a | ||
|
|
b1ae7df556 | ||
|
|
431524445e | ||
|
|
252562d00a | ||
|
|
6a72cfd6e1 | ||
|
|
354ea6fff3 | ||
|
|
8c69455c37 | ||
|
|
fd8faeb0e6 | ||
|
|
dcdbad3554 | ||
|
|
effad3fe4b | ||
|
|
692af42858 | ||
|
|
1693d7ad0f | ||
|
|
3bb9394ae5 | ||
|
|
be39e99918 | ||
|
|
6e28edeb9a | ||
|
|
d50572b108 | ||
|
|
0b8689dc28 | ||
|
|
5994fcfed8 | ||
|
|
e3f6f68885 | ||
|
|
6ff1b6c505 | ||
|
|
4a049c12fe | ||
|
|
15890b7ce7 | ||
|
|
186bb85c44 | ||
|
|
4c6d2c5410 | ||
|
|
f5f86786b2 | ||
|
|
15579c2195 | ||
|
|
e8fe0b0553 | ||
|
|
09953d77b5 | ||
|
|
e4d0055b3e | ||
|
|
a60da2ef38 | ||
|
|
7b01564f83 | ||
|
|
39db8caff1 | ||
|
|
0297542f6b | ||
|
|
17c0ecc72c | ||
|
|
cbcb295185 | ||
|
|
67e3725205 | ||
|
|
61d76ae47d | ||
|
|
fd1ca2dfe8 | ||
|
|
e1032f693f | ||
|
|
a4cadd1128 | ||
|
|
6da52d76a7 | ||
|
|
71a1bbb2be | ||
|
|
8f0e9f9f6b | ||
|
|
072d1f10ab | ||
|
|
5014348229 | ||
|
|
ed78ac7c92 | ||
|
|
53d873d60d | ||
|
|
f2c35fdec6 | ||
|
|
1c69b20ed7 | ||
|
|
547c635552 | ||
|
|
f0d9117ff3 | ||
|
|
9d2bd163c7 | ||
|
|
db8fb5ceda | ||
|
|
a1e66cc5e8 | ||
|
|
7b8bbe531e | ||
|
|
0e2f1d64e3 | ||
|
|
5638a3841f | ||
|
|
6986a43b68 | ||
|
|
11c6489fd1 | ||
|
|
43823bc925 | ||
|
|
a3b661be0d | ||
|
|
f113c68b13 | ||
|
|
137f7f62fb | ||
|
|
dfe76570a1 | ||
|
|
f4c058e13e | ||
|
|
407a0f3733 | ||
|
|
b14269fd23 | ||
|
|
76b7bcd4d7 | ||
|
|
782ab3f582 | ||
|
|
9c6325c131 | ||
|
|
2abeda5961 | ||
|
|
885a543023 | ||
|
|
f3680d222c | ||
|
|
d2b37cf61e | ||
|
|
83f61a9cfb | ||
|
|
b206496f63 | ||
|
|
24fdecf107 | ||
|
|
9bd7262dfc | ||
|
|
a0dff2dd9c | ||
|
|
91b54aafcc | ||
|
|
81a29e6895 | ||
|
|
f762ef668f | ||
|
|
99b9062551 | ||
|
|
ef2c5a1998 | ||
|
|
6e4dd2ab96 | ||
|
|
0c17a17e19 | ||
|
|
03295bbc3c | ||
|
|
b3a1f66759 | ||
|
|
a947f75d3b | ||
|
|
ae0a4c8bbf | ||
|
|
7835991147 | ||
|
|
810644e873 | ||
|
|
8d3bcc025a | ||
|
|
0f549520ef | ||
|
|
ba16fcfaf5 | ||
|
|
68f0998699 | ||
|
|
d031cc138d | ||
|
|
e71b252b65 | ||
|
|
e9cd3e5986 | ||
|
|
4025f42bd9 | ||
|
|
b4216648e4 | ||
|
|
d8e07bfd8e | ||
|
|
199d82969b | ||
|
|
bb74a13c07 | ||
|
|
57624629d6 | ||
|
|
7c6f0cc455 | ||
|
|
422b037087 | ||
|
|
7f854acb05 | ||
|
|
bbf9b1b3d2 | ||
|
|
9cf783677e | ||
|
|
4d5d6ee61b | ||
|
|
44637dcd7f | ||
|
|
98f539de8f | ||
|
|
58fd6d7b94 | ||
|
|
9c96c13a35 | ||
|
|
f7f4651828 | ||
|
|
11afc3dde0 | ||
|
|
88e516adee | ||
|
|
fd95511091 | ||
|
|
0cac5d67ab | ||
|
|
6d6dc00abb | ||
|
|
079763f09a | ||
|
|
978cbf9360 | ||
|
|
3a50f35df9 | ||
|
|
c0968a0987 | ||
|
|
932f9ec34a | ||
|
|
0e5f12126f | ||
|
|
5c7ba0bfd3 | ||
|
|
9933d6c071 | ||
|
|
66929416d4 | ||
|
|
b06935a12e | ||
|
|
806f6ab1eb | ||
|
|
c482624a6c | ||
|
|
17fea90ac9 | ||
|
|
78176d39fd | ||
|
|
ae3c73f610 | ||
|
|
d20f647487 | ||
|
|
6521394865 | ||
|
|
42cac4cf53 | ||
|
|
223d8c5fe3 | ||
|
|
dd0e5b9a7f | ||
|
|
da244a3709 | ||
|
|
938b43c26c | ||
|
|
13fb2fb2ec | ||
|
|
43cc2435c3 | ||
|
|
1b1e43074f | ||
|
|
cacfc100de | ||
|
|
f8c5695aed | ||
|
|
a5972fe0d1 | ||
|
|
184459ba8f | ||
|
|
519fe98e6e | ||
|
|
3df6518006 | ||
|
|
1045f54128 | ||
|
|
0563cc6314 | ||
|
|
e20f2eee59 | ||
|
|
41b8935a6c | ||
|
|
fbdf71ab64 | ||
|
|
d392f9fcd8 | ||
|
|
dedad9f071 | ||
|
|
1f6271fa15 | ||
|
|
c16c22d6e1 | ||
|
|
486a10bec5 | ||
|
|
5fa13e3e31 | ||
|
|
0e746f25a3 | ||
|
|
578b9df6ea | ||
|
|
208e49ce4b | ||
|
|
7aa066cff8 | ||
|
|
64df4cf2db | ||
|
|
451d7badf7 | ||
|
|
d977fa25fa | ||
|
|
bb679a9def | ||
|
|
a3d19942bd | ||
|
|
394195cfdf | ||
|
|
3ca766b2f1 | ||
|
|
3bf8c877c3 | ||
|
|
fba2d4c4a7 | ||
|
|
8503282a5a | ||
|
|
743ea6ac26 | ||
|
|
c69eb84573 | ||
|
|
f98e672f37 | ||
|
|
242fe96b18 | ||
|
|
3f159bac16 | ||
|
|
6c58e9976c | ||
|
|
110d07548f | ||
|
|
f45cee831f | ||
|
|
ef0f3020e4 | ||
|
|
113b2b648c | ||
|
|
57ab4d279e | ||
|
|
8e21c77ead | ||
|
|
4751980659 | ||
|
|
9fe343b725 | ||
|
|
2f5685b405 | ||
|
|
c3117d9efb | ||
|
|
1ebbc74f1d | ||
|
|
aee787d33e | ||
|
|
298c13e719 | ||
|
|
f0c774156e | ||
|
|
08c460dd1a | ||
|
|
e3d0bff9ca | ||
|
|
caf5dd9d5e | ||
|
|
97d7945cef | ||
|
|
9061e81850 | ||
|
|
58339845f4 | ||
|
|
4d4f3de5a5 | ||
|
|
9bfbf2a4ae | ||
|
|
96f8b7c827 | ||
|
|
85f142a206 | ||
|
|
82b963e372 | ||
|
|
74d5477fad | ||
|
|
b5857f0bf8 | ||
|
|
edb5ccdd0b | ||
|
|
0244caf13a | ||
|
|
aaa897337d | ||
|
|
e7c002adef | ||
|
|
9e62a74a23 | ||
|
|
a10abf9934 | ||
|
|
36eb3cd660 | ||
|
|
fd2322cb41 | ||
|
|
4eed3ae99a | ||
|
|
d8855b21eb | ||
|
|
8f47b6746d | ||
|
|
cc2a4c2e20 | ||
|
|
fabeb8e44e | ||
|
|
c27977d4d5 | ||
|
|
d5d28a7513 | ||
|
|
94ccc95515 | ||
|
|
5d5473c8a5 | ||
|
|
251a8e3c39 | ||
|
|
a259226eb2 | ||
|
|
5fba502516 | ||
|
|
ba11040d6b | ||
|
|
668711e432 | ||
|
|
a71d181cb0 | ||
|
|
cab42107f7 | ||
|
|
1f9a79ef09 | ||
|
|
c0fb9ebfce | ||
|
|
e8fcde8de1 | ||
|
|
72dfdd97d8 | ||
|
|
bb88b8499b | ||
|
|
4ac5cb07ca | ||
|
|
4a3e9bbabf | ||
|
|
33376bf399 | ||
|
|
94b7c49196 | ||
|
|
a7faf05393 | ||
|
|
98a96596df | ||
|
|
88bd80c1fa | ||
|
|
c6755aa768 | ||
|
|
01be5c75be | ||
|
|
20bd17f107 | ||
|
|
64ec5709fe | ||
|
|
1ea8678be2 | ||
|
|
8341de05c6 | ||
|
|
47ca0c326e | ||
|
|
54196f34e3 | ||
|
|
9fdf3d548a | ||
|
|
10774d297a | ||
|
|
bf9053705d | ||
|
|
0bd059ec55 | ||
|
|
59d363b3c1 | ||
|
|
94a5de58c8 | ||
|
|
a466ababd0 | ||
|
|
168d577297 | ||
|
|
ddaf01ece9 | ||
|
|
b5301e03a6 | ||
|
|
e9763552f7 | ||
|
|
6b60e09ff2 | ||
|
|
41a52f50df | ||
|
|
93f35c915a | ||
|
|
a2c4f07a57 | ||
|
|
d3dcc61154 | ||
|
|
34ef5147aa | ||
|
|
aa29742be2 | ||
|
|
ef366b47f1 | ||
|
|
23abac2a59 | ||
|
|
d3ba32c43e | ||
|
|
cdf5a97bb6 | ||
|
|
e1b0417c28 | ||
|
|
acf1e2df84 | ||
|
|
831d1df67f | ||
|
|
e67157cf46 | ||
|
|
ac012618db | ||
|
|
7f09d9c2a0 | ||
|
|
0548e61910 | ||
|
|
ad83ff769b | ||
|
|
ca14b00b34 | ||
|
|
52d444f4a9 | ||
|
|
4506f35f2e | ||
|
|
4ab57eb90b | ||
|
|
23ab6fa3a0 | ||
|
|
af8ba18580 | ||
|
|
0b90dd23c1 | ||
|
|
e64be7652a | ||
|
|
179f978f75 | ||
|
|
17b7ee1f3a | ||
|
|
5c73363b16 | ||
|
|
bf21db0ac4 | ||
|
|
0180301b3f | ||
|
|
adfb1f7c7d | ||
|
|
6092fe2aaa | ||
|
|
53868ef4e1 | ||
|
|
e1ad467009 | ||
|
|
12db7b6935 | ||
|
|
7434ad8618 | ||
|
|
e4ab59bcc7 | ||
|
|
9119c6c76f | ||
|
|
9d4d294793 | ||
|
|
750ed556a5 | ||
|
|
5b0d3d060f | ||
|
|
5b0f9dc4e3 | ||
|
|
b0a87d7cf1 | ||
|
|
37d786c82a | ||
|
|
56fe12c479 | ||
|
|
9197180610 | ||
|
|
f4a538371d | ||
|
|
f2ec08cba2 | ||
|
|
8f25531b7f | ||
|
|
0ee6d0b4bf | ||
|
|
4ac4597afb | ||
|
|
143df6f6d2 | ||
|
|
8264ba987b | ||
|
|
7a27d9a192 | ||
|
|
195ad98311 | ||
|
|
29baa5888f | ||
|
|
c7a2719fac | ||
|
|
c190b9b14f | ||
|
|
5fa68e9ca5 | ||
|
|
b9727cc6ab | ||
|
|
d8d76ff647 | ||
|
|
5afa838457 | ||
|
|
2de084944b | ||
|
|
48a8bfa6b3 | ||
|
|
d3ce795c30 | ||
|
|
c04657cd4c | ||
|
|
6255d9dfaa | ||
|
|
f56ea2bee2 | ||
|
|
d6ba60c04d | ||
|
|
37eaa3682a | ||
|
|
c5f6fc3283 | ||
|
|
4daf755da0 | ||
|
|
eee8ad5146 | ||
|
|
bcb3289dad | ||
|
|
ef2ef8ef84 | ||
|
|
c69cf46f06 | ||
|
|
25f59b2918 | ||
|
|
7801b160f2 | ||
|
|
23f8dea182 | ||
|
|
3337fe31c7 | ||
|
|
a752563842 | ||
|
|
c12085b265 | ||
|
|
3ab9077820 | ||
|
|
b94806a143 | ||
|
|
55d10f4d25 | ||
|
|
75745fcb21 | ||
|
|
1cc22da87d | ||
|
|
3092f82dcc | ||
|
|
60a6ef914c | ||
|
|
3553cc4a5f | ||
|
|
b8591b230d | ||
|
|
ecb09badba | ||
|
|
cb43e86d16 | ||
|
|
5c48102ede | ||
|
|
96438ff259 | ||
|
|
c1df3ce08c | ||
|
|
19ad39fa1c | ||
|
|
b296f37801 | ||
|
|
23e44c6065 | ||
|
|
8fd66daab6 | ||
|
|
9e80d48b03 | ||
|
|
eb3082a1eb | ||
|
|
77ea22ac5b | ||
|
|
9959712a06 | ||
|
|
7586fecbca | ||
|
|
94cdb00eb6 | ||
|
|
3d473eb54e | ||
|
|
50b4a2398e | ||
|
|
e6b718c938 | ||
|
|
9370dbcc47 | ||
|
|
8c1e9a2905 | ||
|
|
6072d314e1 | ||
|
|
9277ca1e54 | ||
|
|
d6722607cb | ||
|
|
4ef30db209 | ||
|
|
55c12c9a2d | ||
|
|
4349dae784 | ||
|
|
3e63f2c249 | ||
|
|
5118ab9609 | ||
|
|
7ea118aeae | ||
|
|
af260921c0 | ||
|
|
62db2bb329 | ||
|
|
8d58adbd54 | ||
|
|
b4d251081f | ||
|
|
a382bf8f03 | ||
|
|
28fc43fb11 | ||
|
|
83be1501db | ||
|
|
be156133c5 | ||
|
|
d494db78d9 | ||
|
|
1b9eb74204 | ||
|
|
cda09704a8 | ||
|
|
71f2883562 | ||
|
|
d29d263329 | ||
|
|
20126de1aa | ||
|
|
77f7bb08af | ||
|
|
a5a61f4874 | ||
|
|
e8879f3e77 | ||
|
|
2a6675cffd | ||
|
|
fa4d171f62 | ||
|
|
d4d530bd8e | ||
|
|
f4b011e4e4 | ||
|
|
d80890bf32 | ||
|
|
39392d70dd | ||
|
|
643386f026 | ||
|
|
ed755bf04f | ||
|
|
071c3f28e5 | ||
|
|
7453b7d5f3 | ||
|
|
c9350149d8 | ||
|
|
08789a5815 | ||
|
|
4037af9c1a | ||
|
|
628ff8e524 | ||
|
|
578c75cb1e | ||
|
|
63ab250817 | ||
|
|
39f910a65d | ||
|
|
0fb36562dd | ||
|
|
8c25a15a40 | ||
|
|
f5ee16e201 | ||
|
|
2bcbed30bd | ||
|
|
5026a9171d | ||
|
|
b750c50bfd | ||
|
|
535acd0483 | ||
|
|
db37b3ef9e | ||
|
|
257607ab3d | ||
|
|
3ea1c5c4d2 | ||
|
|
bd23ea028e | ||
|
|
c58d4fe939 | ||
|
|
ddc7059a73 | ||
|
|
2677c43f26 | ||
|
|
48ab67f090 | ||
|
|
089df7d977 | ||
|
|
4fbe0652c9 | ||
|
|
47665dad07 | ||
|
|
ad724463a5 | ||
|
|
6afd7088d3 | ||
|
|
b33140ddeb | ||
|
|
b1c0ae5e7d | ||
|
|
40bcc7a90b | ||
|
|
be17f1523a | ||
|
|
bb58040d9c | ||
|
|
2db0e23584 | ||
|
|
a7337b0a95 | ||
|
|
7821cb884d | ||
|
|
85c29e3629 | ||
|
|
b7ec75aab6 | ||
|
|
38309f2df2 | ||
|
|
7487d34c33 | ||
|
|
e45cb4fc75 | ||
|
|
21008b4cd5 | ||
|
|
cffe85e6c5 | ||
|
|
d12a92eac9 | ||
|
|
11eeaaf792 | ||
|
|
a603efeaf4 | ||
|
|
0bd0a992a4 | ||
|
|
82c8d78a44 | ||
|
|
a83fec756b | ||
|
|
e953598987 | ||
|
|
feaa20d885 | ||
|
|
967fc6d7f4 | ||
|
|
b95bda1e92 | ||
|
|
9c14562850 | ||
|
|
f992742404 | ||
|
|
f2467d07aa | ||
|
|
d69cdb79f7 | ||
|
|
df5d92d709 | ||
|
|
1b5b36523b | ||
|
|
2f424ceecf | ||
|
|
bc986b44b2 | ||
|
|
f4b1a51af6 | ||
|
|
25703ad20e | ||
|
|
ab803d1278 | ||
|
|
0427177857 | ||
|
|
3dfcfc2caa | ||
|
|
d4cff1ae19 | ||
|
|
f5753369e4 | ||
|
|
4c76fac594 | ||
|
|
0d0bcdac31 | ||
|
|
f3bd02f0ef | ||
|
|
e6fde67491 | ||
|
|
b4e3332e02 | ||
|
|
0dea83a4aa | ||
|
|
e8f3f98aa0 | ||
|
|
d61328e459 | ||
|
|
9844704567 | ||
|
|
94a320f23c | ||
|
|
7fc573db27 | ||
|
|
af95616122 | ||
|
|
72f9f1e9c0 | ||
|
|
91b8152321 | ||
|
|
552b6c47ff | ||
|
|
01a155fb00 | ||
|
|
50d0597d56 | ||
|
|
123a030441 | ||
|
|
28ceb323ee | ||
|
|
c624dd5c3a | ||
|
|
a56c11753a | ||
|
|
4341d472aa | ||
|
|
b6e7148daf | ||
|
|
45458f2cdb | ||
|
|
de147b6e54 | ||
|
|
11de137660 | ||
|
|
156c372cd7 | ||
|
|
c979cde002 | ||
|
|
03aab1a123 | ||
|
|
dc803b572c | ||
|
|
4d19042a61 | ||
|
|
923989d1d7 | ||
|
|
cf65e36cf3 | ||
|
|
cf5457c2cd | ||
|
|
ea4aa696a5 | ||
|
|
34195fd3e8 | ||
|
|
40b8167ab4 | ||
|
|
e365f237f5 | ||
|
|
7d449572bd | ||
|
|
181fecaec3 | ||
|
|
7701d1d33d | ||
|
|
6dd736fbdc | ||
|
|
f36ca0cd25 | ||
|
|
9b3b1c7067 | ||
|
|
0dd0d6a13e | ||
|
|
e5bde42303 | ||
|
|
f01a50eb47 | ||
|
|
5ca61ab705 | ||
|
|
4ac4ce6afd | ||
|
|
40a874a0d8 | ||
|
|
f4dd86238d | ||
|
|
4f1eafb044 | ||
|
|
20c9e0cab6 | ||
|
|
9c09cf9cf6 | ||
|
|
3a3af00180 | ||
|
|
281e0c2d62 | ||
|
|
25b81b8789 | ||
|
|
90fdd97a7b | ||
|
|
3c58e0efe0 | ||
|
|
db744f64f6 | ||
|
|
480220a84a | ||
|
|
d0362171cf | ||
|
|
45887d11f6 | ||
|
|
c4bad5c1bc | ||
|
|
40de89df73 | ||
|
|
27f5297e8d | ||
|
|
de185de215 | ||
|
|
d362db2e08 | ||
|
|
db2a49e384 | ||
|
|
d63fcc6e44 | ||
|
|
4444037f5c | ||
|
|
a555513c26 | ||
|
|
039c260216 | ||
|
|
4577c08e05 | ||
|
|
c9ed691919 | ||
|
|
9f96c0d4ea | ||
|
|
91d095f468 | ||
|
|
bff702a6f1 | ||
|
|
a1d6bbd31f | ||
|
|
fb6a9dfbf3 | ||
|
|
3f3c5f3ff4 | ||
|
|
89196cb353 | ||
|
|
9284506b86 | ||
|
|
88c72d1f4d | ||
|
|
5e3bf50b2e | ||
|
|
982f76b4df | ||
|
|
347812d1d3 | ||
|
|
f4449440f8 | ||
|
|
e66675d346 | ||
|
|
45228e2f18 | ||
|
|
b866850fdd | ||
|
|
5b63b9534f | ||
|
|
10449c86a4 | ||
|
|
26a9a9fed2 | ||
|
|
602e42d334 | ||
|
|
4c5a21703e | ||
|
|
f2ee949eff | ||
|
|
3ad255172c | ||
|
|
29b1751d0e | ||
|
|
363da9aa82 | ||
|
|
6c8148ef39 | ||
|
|
3ed4a2e963 | ||
|
|
aaadb48d48 | ||
|
|
52e25c43b9 | ||
|
|
9a66563fc6 | ||
|
|
6ca670d66a | ||
|
|
809653055d | ||
|
|
61325ce507 | ||
|
|
c3989d1906 | ||
|
|
a79887171c | ||
|
|
f29e284c90 | ||
|
|
9a66086fa0 | ||
|
|
1845c261c6 | ||
|
|
70cbcef624 | ||
|
|
9169b2b5ab | ||
|
|
0957c8fb74 | ||
|
|
bb0cd76a5f | ||
|
|
08240c8cf5 | ||
|
|
014acc902d | ||
|
|
33fec9c835 | ||
|
|
3a5ffc7839 | ||
|
|
8a6bf35481 | ||
|
|
f7d27f4bf2 | ||
|
|
378a2d21ee | ||
|
|
3404eb0444 | ||
|
|
13e5701f2a | ||
|
|
432d5d1e20 | ||
|
|
cc05159518 | ||
|
|
119ccb2b95 | ||
|
|
0ef0e908ca | ||
|
|
0063d14dbb | ||
|
|
0d34efb10f | ||
|
|
415f4b2b93 | ||
|
|
07cf5f1d25 | ||
|
|
7d31956169 | ||
|
|
473d443874 | ||
|
|
e294b76121 | ||
|
|
8f3c583870 | ||
|
|
d0d41fe847 | ||
|
|
297f15a3e3 | ||
|
|
d5f0affd4b | ||
|
|
0598aafbfd | ||
|
|
528e22f139 | ||
|
|
f1a8420814 | ||
|
|
e250f1afcd | ||
|
|
ebf24c9872 | ||
|
|
b4c7b240d8 | ||
|
|
22a14a8c98 | ||
|
|
07133b892d | ||
|
|
a8ca18165e | ||
|
|
8c4e71fc84 | ||
|
|
351e2db2ef | ||
|
|
2234feb23d | ||
|
|
fb5125ecee | ||
|
|
e8cbc54a06 | ||
|
|
00512e1303 |
4
.gitattributes
vendored
4
.gitattributes
vendored
@@ -1,3 +1,7 @@
|
|||||||
|
# Go writes go.mod and go.sum with lf even on windows
|
||||||
|
go.mod text eol=lf
|
||||||
|
go.sum text eol=lf
|
||||||
|
|
||||||
# Ignore generated files in GitHub language statistics and diffs
|
# Ignore generated files in GitHub language statistics and diffs
|
||||||
/MANUAL.* linguist-generated=true
|
/MANUAL.* linguist-generated=true
|
||||||
/rclone.1 linguist-generated=true
|
/rclone.1 linguist-generated=true
|
||||||
|
|||||||
140
.github/workflows/build.yml
vendored
140
.github/workflows/build.yml
vendored
@@ -17,22 +17,21 @@ on:
|
|||||||
manual:
|
manual:
|
||||||
description: Manual run (bypass default conditions)
|
description: Manual run (bypass default conditions)
|
||||||
type: boolean
|
type: boolean
|
||||||
required: true
|
|
||||||
default: true
|
default: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
|
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.20'
|
go: '>=1.23.0-rc.1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
@@ -43,14 +42,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: linux_386
|
- job_name: linux_386
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.20'
|
go: '>=1.23.0-rc.1'
|
||||||
goarch: 386
|
goarch: 386
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
quicktest: true
|
quicktest: true
|
||||||
|
|
||||||
- job_name: mac_amd64
|
- job_name: mac_amd64
|
||||||
os: macos-11
|
os: macos-latest
|
||||||
go: '1.20'
|
go: '>=1.23.0-rc.1'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
@@ -58,15 +57,15 @@ jobs:
|
|||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: mac_arm64
|
||||||
os: macos-11
|
os: macos-latest
|
||||||
go: '1.20'
|
go: '>=1.23.0-rc.1'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows
|
- job_name: windows
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '1.20'
|
go: '>=1.23.0-rc.1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
cgo: '0'
|
cgo: '0'
|
||||||
build_flags: '-include "^windows/"'
|
build_flags: '-include "^windows/"'
|
||||||
@@ -76,20 +75,20 @@ jobs:
|
|||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.20'
|
go: '>=1.23.0-rc.1'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.18
|
- job_name: go1.21
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.18'
|
go: '1.21'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
- job_name: go1.19
|
- job_name: go1.22
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.19'
|
go: '1.22'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
@@ -99,12 +98,12 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
check-latest: true
|
check-latest: true
|
||||||
@@ -124,15 +123,21 @@ jobs:
|
|||||||
sudo modprobe fuse
|
sudo modprobe fuse
|
||||||
sudo chmod 666 /dev/fuse
|
sudo chmod 666 /dev/fuse
|
||||||
sudo chown root:$USER /etc/fuse.conf
|
sudo chown root:$USER /etc/fuse.conf
|
||||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
|
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
|
||||||
- name: Install Libraries on macOS
|
- name: Install Libraries on macOS
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||||
|
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||||
|
unset HOMEBREW_NO_INSTALL_FROM_API
|
||||||
|
brew untap --force homebrew/core
|
||||||
|
brew untap --force homebrew/cask
|
||||||
brew update
|
brew update
|
||||||
brew install --cask macfuse
|
brew install --cask macfuse
|
||||||
if: matrix.os == 'macos-11'
|
brew install git-annex git-annex-remote-rclone
|
||||||
|
if: matrix.os == 'macos-latest'
|
||||||
|
|
||||||
- name: Install Libraries on Windows
|
- name: Install Libraries on Windows
|
||||||
shell: powershell
|
shell: powershell
|
||||||
@@ -162,14 +167,6 @@ jobs:
|
|||||||
printf "\n\nSystem environment:\n\n"
|
printf "\n\nSystem environment:\n\n"
|
||||||
env
|
env
|
||||||
|
|
||||||
- name: Go module cache
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
|
|
||||||
- name: Build rclone
|
- name: Build rclone
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -211,7 +208,6 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||||
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
|
|
||||||
make ci_beta
|
make ci_beta
|
||||||
env:
|
env:
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||||
@@ -220,27 +216,77 @@ jobs:
|
|||||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
name: "lint"
|
name: "lint"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
- name: Get runner parameters
|
||||||
|
id: get-runner-parameters
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||||
|
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Code quality test
|
|
||||||
uses: golangci/golangci-lint-action@v3
|
|
||||||
with:
|
|
||||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
|
||||||
version: latest
|
|
||||||
|
|
||||||
# Run govulncheck on the latest go version, the one we build binaries with
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v4
|
id: setup-go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.20'
|
go-version: '>=1.23.0-rc.1'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
cache: false
|
||||||
|
|
||||||
|
- name: Cache
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/go/pkg/mod
|
||||||
|
~/.cache/go-build
|
||||||
|
~/.cache/golangci-lint
|
||||||
|
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
||||||
|
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||||
|
|
||||||
|
- name: Code quality test (Linux)
|
||||||
|
uses: golangci/golangci-lint-action@v6
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
skip-cache: true
|
||||||
|
|
||||||
|
- name: Code quality test (Windows)
|
||||||
|
uses: golangci/golangci-lint-action@v6
|
||||||
|
env:
|
||||||
|
GOOS: "windows"
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
skip-cache: true
|
||||||
|
|
||||||
|
- name: Code quality test (macOS)
|
||||||
|
uses: golangci/golangci-lint-action@v6
|
||||||
|
env:
|
||||||
|
GOOS: "darwin"
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
skip-cache: true
|
||||||
|
|
||||||
|
- name: Code quality test (FreeBSD)
|
||||||
|
uses: golangci/golangci-lint-action@v6
|
||||||
|
env:
|
||||||
|
GOOS: "freebsd"
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
skip-cache: true
|
||||||
|
|
||||||
|
- name: Code quality test (OpenBSD)
|
||||||
|
uses: golangci/golangci-lint-action@v6
|
||||||
|
env:
|
||||||
|
GOOS: "openbsd"
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
skip-cache: true
|
||||||
|
|
||||||
- name: Install govulncheck
|
- name: Install govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
@@ -249,30 +295,22 @@ jobs:
|
|||||||
run: govulncheck ./...
|
run: govulncheck ./...
|
||||||
|
|
||||||
android:
|
android:
|
||||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
name: "android-all"
|
name: "android-all"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# Upgrade together with NDK version
|
# Upgrade together with NDK version
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.20'
|
go-version: '>=1.23.0-rc.1'
|
||||||
|
|
||||||
- name: Go module cache
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
|
|
||||||
- name: Set global environment variables
|
- name: Set global environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
@@ -10,26 +10,35 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
|
- name: Free some space
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h .
|
||||||
|
# Remove android SDK
|
||||||
|
sudo rm -rf /usr/local/lib/android || true
|
||||||
|
# Remove .net runtime
|
||||||
|
sudo rm -rf /usr/share/dotnet || true
|
||||||
|
df -h .
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
- name: Extract metadata (tags, labels) for Docker
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ghcr.io/${{ github.repository }}
|
images: ghcr.io/${{ github.repository }}
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
# This is the user that triggered the Workflow. In this case, it will
|
# This is the user that triggered the Workflow. In this case, it will
|
||||||
@@ -42,9 +51,12 @@ jobs:
|
|||||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||||
# for more detailed information.
|
# for more detailed information.
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Show disk usage
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h .
|
||||||
- name: Build and publish image
|
- name: Build and publish image
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
context: .
|
context: .
|
||||||
@@ -54,8 +66,12 @@ jobs:
|
|||||||
rclone/rclone:beta
|
rclone/rclone:beta
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
cache-from: type=gha
|
cache-from: type=gha, scope=${{ github.workflow }}
|
||||||
cache-to: type=gha,mode=max
|
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||||
provenance: false
|
provenance: false
|
||||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||||
# https://github.com/docker/build-push-action/issues/252
|
# https://github.com/docker/build-push-action/issues/252
|
||||||
|
- name: Show disk usage
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h .
|
||||||
|
|||||||
@@ -10,8 +10,17 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
|
- name: Free some space
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h .
|
||||||
|
# Remove android SDK
|
||||||
|
sudo rm -rf /usr/local/lib/android || true
|
||||||
|
# Remove .net runtime
|
||||||
|
sudo rm -rf /usr/share/dotnet || true
|
||||||
|
df -h .
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Get actual patch version
|
- name: Get actual patch version
|
||||||
@@ -23,15 +32,27 @@ jobs:
|
|||||||
- name: Get actual major version
|
- name: Get actual major version
|
||||||
id: actual_major_version
|
id: actual_major_version
|
||||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||||
- name: Build and publish image
|
- name: Set up QEMU
|
||||||
uses: ilteoood/docker_buildx@1.1.0
|
uses: docker/setup-qemu-action@v3
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
imageName: rclone/rclone
|
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
- name: Build and publish image
|
||||||
publish: true
|
uses: docker/build-push-action@v6
|
||||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
with:
|
||||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
file: Dockerfile
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
rclone/rclone:latest
|
||||||
|
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||||
|
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||||
|
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||||
|
|
||||||
build_docker_volume_plugin:
|
build_docker_volume_plugin:
|
||||||
if: github.repository == 'rclone/rclone'
|
if: github.repository == 'rclone/rclone'
|
||||||
@@ -39,8 +60,17 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build docker plugin job
|
name: Build docker plugin job
|
||||||
steps:
|
steps:
|
||||||
|
- name: Free some space
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h .
|
||||||
|
# Remove android SDK
|
||||||
|
sudo rm -rf /usr/local/lib/android || true
|
||||||
|
# Remove .net runtime
|
||||||
|
sudo rm -rf /usr/share/dotnet || true
|
||||||
|
df -h .
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Build and publish docker plugin
|
- name: Build and publish docker plugin
|
||||||
|
|||||||
15
.github/workflows/notify.yml
vendored
Normal file
15
.github/workflows/notify.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
name: Notify users based on issue labels
|
||||||
|
|
||||||
|
on:
|
||||||
|
issues:
|
||||||
|
types: [labeled]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
notify:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: jenschelkopf/issue-label-notification-action@1.3
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
|
||||||
|
recipients: |
|
||||||
|
Support Contract=@rclone/support
|
||||||
28
.github/workflows/winget.yml
vendored
28
.github/workflows/winget.yml
vendored
@@ -1,14 +1,14 @@
|
|||||||
name: Publish to Winget
|
name: Publish to Winget
|
||||||
on:
|
on:
|
||||||
release:
|
release:
|
||||||
types: [released]
|
types: [released]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
publish:
|
publish:
|
||||||
runs-on: windows-latest # Action can only run on Windows
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||||
with:
|
with:
|
||||||
identifier: Rclone.Rclone
|
identifier: Rclone.Rclone
|
||||||
installers-regex: '-windows-\w+\.zip$'
|
installers-regex: '-windows-\w+\.zip$'
|
||||||
token: ${{ secrets.WINGET_TOKEN }}
|
token: ${{ secrets.WINGET_TOKEN }}
|
||||||
|
|||||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -3,15 +3,20 @@ _junk/
|
|||||||
rclone
|
rclone
|
||||||
rclone.exe
|
rclone.exe
|
||||||
build
|
build
|
||||||
docs/public
|
/docs/public/
|
||||||
|
/docs/.hugo_build.lock
|
||||||
|
/docs/static/img/logos/
|
||||||
rclone.iml
|
rclone.iml
|
||||||
.idea
|
.idea
|
||||||
.history
|
.history
|
||||||
|
.vscode
|
||||||
*.test
|
*.test
|
||||||
*.log
|
|
||||||
*.iml
|
*.iml
|
||||||
fuzz-build.zip
|
fuzz-build.zip
|
||||||
*.orig
|
*.orig
|
||||||
*.rej
|
*.rej
|
||||||
Thumbs.db
|
Thumbs.db
|
||||||
__pycache__
|
__pycache__
|
||||||
|
.DS_Store
|
||||||
|
resource_windows_*.syso
|
||||||
|
.devcontainer
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ linters:
|
|||||||
- stylecheck
|
- stylecheck
|
||||||
- unused
|
- unused
|
||||||
- misspell
|
- misspell
|
||||||
|
- gocritic
|
||||||
#- prealloc
|
#- prealloc
|
||||||
#- maligned
|
#- maligned
|
||||||
disable-all: true
|
disable-all: true
|
||||||
@@ -33,24 +34,111 @@ issues:
|
|||||||
- staticcheck
|
- staticcheck
|
||||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||||
|
|
||||||
|
# don't disable the revive messages about comments on exported functions
|
||||||
|
include:
|
||||||
|
- EXC0012
|
||||||
|
- EXC0013
|
||||||
|
- EXC0014
|
||||||
|
- EXC0015
|
||||||
|
|
||||||
run:
|
run:
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||||
timeout: 10m
|
timeout: 10m
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
revive:
|
revive:
|
||||||
|
# setting rules seems to disable all the rules, so re-enable them here
|
||||||
rules:
|
rules:
|
||||||
- name: unreachable-code
|
- name: blank-imports
|
||||||
disabled: true
|
disabled: false
|
||||||
- name: unused-parameter
|
- name: context-as-argument
|
||||||
disabled: true
|
disabled: false
|
||||||
|
- name: context-keys-type
|
||||||
|
disabled: false
|
||||||
|
- name: dot-imports
|
||||||
|
disabled: false
|
||||||
- name: empty-block
|
- name: empty-block
|
||||||
disabled: true
|
disabled: true
|
||||||
|
- name: error-naming
|
||||||
|
disabled: false
|
||||||
|
- name: error-return
|
||||||
|
disabled: false
|
||||||
|
- name: error-strings
|
||||||
|
disabled: false
|
||||||
|
- name: errorf
|
||||||
|
disabled: false
|
||||||
|
- name: exported
|
||||||
|
disabled: false
|
||||||
|
- name: increment-decrement
|
||||||
|
disabled: true
|
||||||
|
- name: indent-error-flow
|
||||||
|
disabled: false
|
||||||
|
- name: package-comments
|
||||||
|
disabled: false
|
||||||
|
- name: range
|
||||||
|
disabled: false
|
||||||
|
- name: receiver-naming
|
||||||
|
disabled: false
|
||||||
- name: redefines-builtin-id
|
- name: redefines-builtin-id
|
||||||
disabled: true
|
disabled: true
|
||||||
- name: superfluous-else
|
- name: superfluous-else
|
||||||
disabled: true
|
disabled: true
|
||||||
|
- name: time-naming
|
||||||
|
disabled: false
|
||||||
|
- name: unexported-return
|
||||||
|
disabled: false
|
||||||
|
- name: unreachable-code
|
||||||
|
disabled: true
|
||||||
|
- name: unused-parameter
|
||||||
|
disabled: true
|
||||||
|
- name: var-declaration
|
||||||
|
disabled: false
|
||||||
|
- name: var-naming
|
||||||
|
disabled: false
|
||||||
stylecheck:
|
stylecheck:
|
||||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||||
|
gocritic:
|
||||||
|
# Enable all default checks with some exceptions and some additions (commented).
|
||||||
|
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
||||||
|
disable-all: true
|
||||||
|
enabled-checks:
|
||||||
|
#- appendAssign # Enabled by default
|
||||||
|
- argOrder
|
||||||
|
- assignOp
|
||||||
|
- badCall
|
||||||
|
- badCond
|
||||||
|
#- captLocal # Enabled by default
|
||||||
|
- caseOrder
|
||||||
|
- codegenComment
|
||||||
|
#- commentFormatting # Enabled by default
|
||||||
|
- defaultCaseOrder
|
||||||
|
- deprecatedComment
|
||||||
|
- dupArg
|
||||||
|
- dupBranchBody
|
||||||
|
- dupCase
|
||||||
|
- dupSubExpr
|
||||||
|
- elseif
|
||||||
|
#- exitAfterDefer # Enabled by default
|
||||||
|
- flagDeref
|
||||||
|
- flagName
|
||||||
|
#- ifElseChain # Enabled by default
|
||||||
|
- mapKey
|
||||||
|
- newDeref
|
||||||
|
- offBy1
|
||||||
|
- regexpMust
|
||||||
|
- ruleguard # Not enabled by default
|
||||||
|
#- singleCaseSwitch # Enabled by default
|
||||||
|
- sloppyLen
|
||||||
|
- sloppyTypeAssert
|
||||||
|
- switchTrue
|
||||||
|
- typeSwitchVar
|
||||||
|
- underef
|
||||||
|
- unlambda
|
||||||
|
- unslice
|
||||||
|
- valSwap
|
||||||
|
- wrapperFunc
|
||||||
|
settings:
|
||||||
|
ruleguard:
|
||||||
|
rules: "${configDir}/bin/rules.go"
|
||||||
|
|||||||
316
CONTRIBUTING.md
316
CONTRIBUTING.md
@@ -1,8 +1,8 @@
|
|||||||
# Contributing to rclone #
|
# Contributing to rclone
|
||||||
|
|
||||||
This is a short guide on how to contribute things to rclone.
|
This is a short guide on how to contribute things to rclone.
|
||||||
|
|
||||||
## Reporting a bug ##
|
## Reporting a bug
|
||||||
|
|
||||||
If you've just got a question or aren't sure if you've found a bug
|
If you've just got a question or aren't sure if you've found a bug
|
||||||
then please use the [rclone forum](https://forum.rclone.org/) instead
|
then please use the [rclone forum](https://forum.rclone.org/) instead
|
||||||
@@ -12,13 +12,13 @@ When filing an issue, please include the following information if
|
|||||||
possible as well as a description of the problem. Make sure you test
|
possible as well as a description of the problem. Make sure you test
|
||||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||||
|
|
||||||
* Rclone version (e.g. output from `rclone version`)
|
- Rclone version (e.g. output from `rclone version`)
|
||||||
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||||
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
- if the log contains secrets then edit the file with a text editor first to obscure them
|
||||||
|
|
||||||
## Submitting a new feature or bug fix ##
|
## Submitting a new feature or bug fix
|
||||||
|
|
||||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||||
like to implement then please submit a pull request via GitHub.
|
like to implement then please submit a pull request via GitHub.
|
||||||
@@ -73,9 +73,9 @@ This is typically enough if you made a simple bug fix, otherwise please read the
|
|||||||
|
|
||||||
Make sure you
|
Make sure you
|
||||||
|
|
||||||
* Add [unit tests](#testing) for a new feature.
|
- Add [unit tests](#testing) for a new feature.
|
||||||
* Add [documentation](#writing-documentation) for a new feature.
|
- Add [documentation](#writing-documentation) for a new feature.
|
||||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages).
|
||||||
|
|
||||||
When you are done with that push your changes to GitHub:
|
When you are done with that push your changes to GitHub:
|
||||||
|
|
||||||
@@ -88,9 +88,9 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
|
|||||||
|
|
||||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||||
|
|
||||||
## Using Git and GitHub ##
|
## Using Git and GitHub
|
||||||
|
|
||||||
### Committing your changes ###
|
### Committing your changes
|
||||||
|
|
||||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||||
|
|
||||||
@@ -107,7 +107,7 @@ You can modify the message or changes in the latest commit using:
|
|||||||
|
|
||||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
|
|
||||||
### Replacing your previously pushed commits ###
|
### Replacing your previously pushed commits
|
||||||
|
|
||||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||||
|
|
||||||
@@ -115,7 +115,7 @@ Your previously pushed commits are replaced by:
|
|||||||
|
|
||||||
git push --force origin my-new-feature
|
git push --force origin my-new-feature
|
||||||
|
|
||||||
### Basing your changes on the latest master ###
|
### Basing your changes on the latest master
|
||||||
|
|
||||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||||
|
|
||||||
@@ -149,13 +149,21 @@ If you squash commits that have been pushed to GitHub, then you will have to [re
|
|||||||
|
|
||||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||||
|
|
||||||
### GitHub Continuous Integration ###
|
### GitHub Continuous Integration
|
||||||
|
|
||||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||||
|
|
||||||
## Testing ##
|
## Testing
|
||||||
|
|
||||||
### Quick testing ###
|
### Code quality tests
|
||||||
|
|
||||||
|
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
|
||||||
|
|
||||||
|
You can run them with `make check` or with `golangci-lint run ./...`.
|
||||||
|
|
||||||
|
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
|
||||||
|
|
||||||
|
### Quick testing
|
||||||
|
|
||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
@@ -168,7 +176,7 @@ You can also use `make`, if supported by your platform
|
|||||||
|
|
||||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||||
|
|
||||||
### Backend testing ###
|
### Backend testing
|
||||||
|
|
||||||
rclone contains a mixture of unit tests and integration tests.
|
rclone contains a mixture of unit tests and integration tests.
|
||||||
Because it is difficult (and in some respects pointless) to test cloud
|
Because it is difficult (and in some respects pointless) to test cloud
|
||||||
@@ -201,9 +209,9 @@ altogether with an HTML report and test retries then from the
|
|||||||
project root:
|
project root:
|
||||||
|
|
||||||
go install github.com/rclone/rclone/fstest/test_all
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
test_all -backend drive
|
test_all -backends drive
|
||||||
|
|
||||||
### Full integration testing ###
|
### Full integration testing
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
@@ -218,55 +226,56 @@ The commands may require some extra go packages which you can install with
|
|||||||
The full integration tests are run daily on the integration test server. You can
|
The full integration tests are run daily on the integration test server. You can
|
||||||
find the results at https://pub.rclone.org/integration-tests/
|
find the results at https://pub.rclone.org/integration-tests/
|
||||||
|
|
||||||
## Code Organisation ##
|
## Code Organisation
|
||||||
|
|
||||||
Rclone code is organised into a small number of top level directories
|
Rclone code is organised into a small number of top level directories
|
||||||
with modules beneath.
|
with modules beneath.
|
||||||
|
|
||||||
* backend - the rclone backends for interfacing to cloud providers -
|
- backend - the rclone backends for interfacing to cloud providers -
|
||||||
* all - import this to load all the cloud providers
|
- all - import this to load all the cloud providers
|
||||||
* ...providers
|
- ...providers
|
||||||
* bin - scripts for use while building or maintaining rclone
|
- bin - scripts for use while building or maintaining rclone
|
||||||
* cmd - the rclone commands
|
- cmd - the rclone commands
|
||||||
* all - import this to load all the commands
|
- all - import this to load all the commands
|
||||||
* ...commands
|
- ...commands
|
||||||
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||||
* docs - the documentation and website
|
- docs - the documentation and website
|
||||||
* content - adjust these docs only - everything else is autogenerated
|
- content - adjust these docs only - everything else is autogenerated
|
||||||
* command - these are auto-generated - edit the corresponding .go file
|
- command - these are auto-generated - edit the corresponding .go file
|
||||||
* fs - main rclone definitions - minimal amount of code
|
- fs - main rclone definitions - minimal amount of code
|
||||||
* accounting - bandwidth limiting and statistics
|
- accounting - bandwidth limiting and statistics
|
||||||
* asyncreader - an io.Reader which reads ahead
|
- asyncreader - an io.Reader which reads ahead
|
||||||
* config - manage the config file and flags
|
- config - manage the config file and flags
|
||||||
* driveletter - detect if a name is a drive letter
|
- driveletter - detect if a name is a drive letter
|
||||||
* filter - implements include/exclude filtering
|
- filter - implements include/exclude filtering
|
||||||
* fserrors - rclone specific error handling
|
- fserrors - rclone specific error handling
|
||||||
* fshttp - http handling for rclone
|
- fshttp - http handling for rclone
|
||||||
* fspath - path handling for rclone
|
- fspath - path handling for rclone
|
||||||
* hash - defines rclone's hash types and functions
|
- hash - defines rclone's hash types and functions
|
||||||
* list - list a remote
|
- list - list a remote
|
||||||
* log - logging facilities
|
- log - logging facilities
|
||||||
* march - iterates directories in lock step
|
- march - iterates directories in lock step
|
||||||
* object - in memory Fs objects
|
- object - in memory Fs objects
|
||||||
* operations - primitives for sync, e.g. Copy, Move
|
- operations - primitives for sync, e.g. Copy, Move
|
||||||
* sync - sync directories
|
- sync - sync directories
|
||||||
* walk - walk a directory
|
- walk - walk a directory
|
||||||
* fstest - provides integration test framework
|
- fstest - provides integration test framework
|
||||||
* fstests - integration tests for the backends
|
- fstests - integration tests for the backends
|
||||||
* mockdir - mocks an fs.Directory
|
- mockdir - mocks an fs.Directory
|
||||||
* mockobject - mocks an fs.Object
|
- mockobject - mocks an fs.Object
|
||||||
* test_all - Runs integration tests for everything
|
- test_all - Runs integration tests for everything
|
||||||
* graphics - the images used in the website, etc.
|
- graphics - the images used in the website, etc.
|
||||||
* lib - libraries used by the backend
|
- lib - libraries used by the backend
|
||||||
* atexit - register functions to run when rclone exits
|
- atexit - register functions to run when rclone exits
|
||||||
* dircache - directory ID to name caching
|
- dircache - directory ID to name caching
|
||||||
* oauthutil - helpers for using oauth
|
- oauthutil - helpers for using oauth
|
||||||
* pacer - retries with backoff and paces operations
|
- pacer - retries with backoff and paces operations
|
||||||
* readers - a selection of useful io.Readers
|
- readers - a selection of useful io.Readers
|
||||||
* rest - a thin abstraction over net/http for REST
|
- rest - a thin abstraction over net/http for REST
|
||||||
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
- librclone - in memory interface to rclone's API for embedding rclone
|
||||||
|
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||||
|
|
||||||
## Writing Documentation ##
|
## Writing Documentation
|
||||||
|
|
||||||
If you are adding a new feature then please update the documentation.
|
If you are adding a new feature then please update the documentation.
|
||||||
|
|
||||||
@@ -277,22 +286,22 @@ alphabetical order.
|
|||||||
If you add a new backend option/flag, then it should be documented in
|
If you add a new backend option/flag, then it should be documented in
|
||||||
the source file in the `Help:` field.
|
the source file in the `Help:` field.
|
||||||
|
|
||||||
* Start with the most important information about the option,
|
- Start with the most important information about the option,
|
||||||
as a single sentence on a single line.
|
as a single sentence on a single line.
|
||||||
* This text will be used for the command-line flag help.
|
- This text will be used for the command-line flag help.
|
||||||
* It will be combined with other information, such as any default value,
|
- It will be combined with other information, such as any default value,
|
||||||
and the result will look odd if not written as a single sentence.
|
and the result will look odd if not written as a single sentence.
|
||||||
* It should end with a period/full stop character, which will be shown
|
- It should end with a period/full stop character, which will be shown
|
||||||
in docs but automatically removed when producing the flag help.
|
in docs but automatically removed when producing the flag help.
|
||||||
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||||
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||||
* Like with docs generated from Markdown, a single line break is ignored
|
- Like with docs generated from Markdown, a single line break is ignored
|
||||||
and two line breaks creates a new paragraph.
|
and two line breaks creates a new paragraph.
|
||||||
* This text will be shown to the user in `rclone config`
|
- This text will be shown to the user in `rclone config`
|
||||||
and in the docs (where it will be added by `make backenddocs`,
|
and in the docs (where it will be added by `make backenddocs`,
|
||||||
normally run some time before next release).
|
normally run some time before next release).
|
||||||
* To create options of enumeration type use the `Examples:` field.
|
- To create options of enumeration type use the `Examples:` field.
|
||||||
* Each example value have their own `Help:` field, but they are treated
|
- Each example value have their own `Help:` field, but they are treated
|
||||||
a bit different than the main option help text. They will be shown
|
a bit different than the main option help text. They will be shown
|
||||||
as an unordered list, therefore a single line break is enough to
|
as an unordered list, therefore a single line break is enough to
|
||||||
create a new list item. Also, for enumeration texts like name of
|
create a new list item. Also, for enumeration texts like name of
|
||||||
@@ -312,12 +321,12 @@ combined unmodified with other information (such as any default value).
|
|||||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||||
for small changes in the docs which makes it very easy.
|
for small changes in the docs which makes it very easy.
|
||||||
|
|
||||||
## Making a release ##
|
## Making a release
|
||||||
|
|
||||||
There are separate instructions for making a release in the RELEASE.md
|
There are separate instructions for making a release in the RELEASE.md
|
||||||
file.
|
file.
|
||||||
|
|
||||||
## Commit messages ##
|
## Commit messages
|
||||||
|
|
||||||
Please make the first line of your commit message a summary of the
|
Please make the first line of your commit message a summary of the
|
||||||
change that a user (not a developer) of rclone would like to read, and
|
change that a user (not a developer) of rclone would like to read, and
|
||||||
@@ -358,7 +367,7 @@ error fixing the hang.
|
|||||||
Fixes #1498
|
Fixes #1498
|
||||||
```
|
```
|
||||||
|
|
||||||
## Adding a dependency ##
|
## Adding a dependency
|
||||||
|
|
||||||
rclone uses the [go
|
rclone uses the [go
|
||||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||||
@@ -370,7 +379,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
|||||||
instructions below. These will fetch the dependency and add it to
|
instructions below. These will fetch the dependency and add it to
|
||||||
`go.mod` and `go.sum`.
|
`go.mod` and `go.sum`.
|
||||||
|
|
||||||
GO111MODULE=on go get github.com/ncw/new_dependency
|
go get github.com/ncw/new_dependency
|
||||||
|
|
||||||
You can add constraints on that package when doing `go get` (see the
|
You can add constraints on that package when doing `go get` (see the
|
||||||
go docs linked above), but don't unless you really need to.
|
go docs linked above), but don't unless you really need to.
|
||||||
@@ -378,15 +387,15 @@ go docs linked above), but don't unless you really need to.
|
|||||||
Please check in the changes generated by `go mod` including `go.mod`
|
Please check in the changes generated by `go mod` including `go.mod`
|
||||||
and `go.sum` in the same commit as your other changes.
|
and `go.sum` in the same commit as your other changes.
|
||||||
|
|
||||||
## Updating a dependency ##
|
## Updating a dependency
|
||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
GO111MODULE=on go get -u golang.org/x/crypto
|
go get golang.org/x/crypto
|
||||||
|
|
||||||
Check in a single commit as above.
|
Check in a single commit as above.
|
||||||
|
|
||||||
## Updating all the dependencies ##
|
## Updating all the dependencies
|
||||||
|
|
||||||
In order to update all the dependencies then run `make update`. This
|
In order to update all the dependencies then run `make update`. This
|
||||||
just uses the go modules to update all the modules to their latest
|
just uses the go modules to update all the modules to their latest
|
||||||
@@ -395,7 +404,7 @@ stable release. Check in the changes in a single commit as above.
|
|||||||
This should be done early in the release cycle to pick up new versions
|
This should be done early in the release cycle to pick up new versions
|
||||||
of packages in time for them to get some testing.
|
of packages in time for them to get some testing.
|
||||||
|
|
||||||
## Updating a backend ##
|
## Updating a backend
|
||||||
|
|
||||||
If you update a backend then please run the unit tests and the
|
If you update a backend then please run the unit tests and the
|
||||||
integration tests for that backend.
|
integration tests for that backend.
|
||||||
@@ -410,82 +419,133 @@ integration tests.
|
|||||||
|
|
||||||
The next section goes into more detail about the tests.
|
The next section goes into more detail about the tests.
|
||||||
|
|
||||||
## Writing a new backend ##
|
## Writing a new backend
|
||||||
|
|
||||||
Choose a name. The docs here will use `remote` as an example.
|
Choose a name. The docs here will use `remote` as an example.
|
||||||
|
|
||||||
Note that in rclone terminology a file system backend is called a
|
Note that in rclone terminology a file system backend is called a
|
||||||
remote or an fs.
|
remote or an fs.
|
||||||
|
|
||||||
Research
|
### Research
|
||||||
|
|
||||||
* Look at the interfaces defined in `fs/fs.go`
|
- Look at the interfaces defined in `fs/types.go`
|
||||||
* Study one or more of the existing remotes
|
- Study one or more of the existing remotes
|
||||||
|
|
||||||
Getting going
|
### Getting going
|
||||||
|
|
||||||
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||||
* box is a good one to start from if you have a directory-based remote
|
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
|
||||||
* b2 is a good one to start from if you have a bucket-based remote
|
- b2 is a good one to start from if you have a bucket-based remote
|
||||||
* Add your remote to the imports in `backend/all/all.go`
|
- Add your remote to the imports in `backend/all/all.go`
|
||||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
|
||||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
- Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||||
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||||
* `rclone purge -v TestRemote:rclone-info`
|
- `rclone purge -v TestRemote:rclone-info`
|
||||||
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||||
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||||
* open `remote.csv` in a spreadsheet and examine
|
- open `remote.csv` in a spreadsheet and examine
|
||||||
|
|
||||||
Unit tests
|
### Guidelines for a speedy merge
|
||||||
|
|
||||||
* Create a config entry called `TestRemote` for the unit tests to use
|
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
||||||
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
||||||
* Make sure all tests pass with `go test -v`
|
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
|
||||||
|
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
|
||||||
|
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
||||||
|
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
|
||||||
|
|
||||||
Integration tests
|
### Unit tests
|
||||||
|
|
||||||
* Add your backend to `fstest/test_all/config.yaml`
|
- Create a config entry called `TestRemote` for the unit tests to use
|
||||||
* Once you've done that then you can use the integration test framework from the project root:
|
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||||
* go install ./...
|
- Make sure all tests pass with `go test -v`
|
||||||
* test_all -backends remote
|
|
||||||
|
### Integration tests
|
||||||
|
|
||||||
|
- Add your backend to `fstest/test_all/config.yaml`
|
||||||
|
- Once you've done that then you can use the integration test framework from the project root:
|
||||||
|
- go install ./...
|
||||||
|
- test_all -backends remote
|
||||||
|
|
||||||
Or if you want to run the integration tests manually:
|
Or if you want to run the integration tests manually:
|
||||||
|
|
||||||
* Make sure integration tests pass with
|
- Make sure integration tests pass with
|
||||||
* `cd fs/operations`
|
- `cd fs/operations`
|
||||||
* `go test -v -remote TestRemote:`
|
- `go test -v -remote TestRemote:`
|
||||||
* `cd fs/sync`
|
- `cd fs/sync`
|
||||||
* `go test -v -remote TestRemote:`
|
- `go test -v -remote TestRemote:`
|
||||||
* If your remote defines `ListR` check with this also
|
- If your remote defines `ListR` check with this also
|
||||||
* `go test -v -remote TestRemote: -fast-list`
|
- `go test -v -remote TestRemote: -fast-list`
|
||||||
|
|
||||||
See the [testing](#testing) section for more information on integration tests.
|
See the [testing](#testing) section for more information on integration tests.
|
||||||
|
|
||||||
Add your fs to the docs - you'll need to pick an icon for it from
|
### Backend documentation
|
||||||
|
|
||||||
|
Add your backend to the docs - you'll need to pick an icon for it from
|
||||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||||
`Google Drive`) but with the local file system last.
|
`Google Drive`) but with the local file system last.
|
||||||
|
|
||||||
* `README.md` - main GitHub page
|
- `README.md` - main GitHub page
|
||||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||||
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||||
* update them with `make backenddocs` - revert any changes in other backends
|
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||||
* `docs/content/overview.md` - overview docs
|
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||||
* `docs/content/docs.md` - list of remotes in config section
|
- `docs/content/docs.md` - list of remotes in config section
|
||||||
* `docs/content/_index.md` - front page of rclone.org
|
- `docs/content/_index.md` - front page of rclone.org
|
||||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
- `bin/make_manual.py` - add the page to the `docs` constant
|
||||||
|
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
Once you've written the docs, run `make serve` and check they look OK
|
||||||
in the web browser and the links (internal and external) all work.
|
in the web browser and the links (internal and external) all work.
|
||||||
|
|
||||||
## Writing a plugin ##
|
## Adding a new s3 provider
|
||||||
|
|
||||||
|
It is quite easy to add a new S3 provider to rclone.
|
||||||
|
|
||||||
|
You'll need to modify the following files
|
||||||
|
|
||||||
|
- `backend/s3/s3.go`
|
||||||
|
- Add the provider to `providerOption` at the top of the file
|
||||||
|
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||||
|
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||||
|
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||||
|
- `docs/content/s3.md`
|
||||||
|
- Add the provider at the top of the page.
|
||||||
|
- Add a section about the provider linked from there.
|
||||||
|
- Add a transcript of a trial `rclone config` session
|
||||||
|
- Edit the transcript to remove things which might change in subsequent versions
|
||||||
|
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||||
|
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
||||||
|
- `README.md` - this is the home page in github
|
||||||
|
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||||
|
- `docs/content/_index.md` - this is the home page of rclone.org
|
||||||
|
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||||
|
|
||||||
|
When adding the provider, endpoints, quirks, docs etc keep them in
|
||||||
|
alphabetical order by `Provider` name, but with `AWS` first and
|
||||||
|
`Other` last.
|
||||||
|
|
||||||
|
Once you've written the docs, run `make serve` and check they look OK
|
||||||
|
in the web browser and the links (internal and external) all work.
|
||||||
|
|
||||||
|
Once you've written the code, test `rclone config` works to your
|
||||||
|
satisfaction, and check the integration tests work `go test -v -remote
|
||||||
|
NewS3Provider:`. You may need to adjust the quirks to get them to
|
||||||
|
pass. Some providers just can't pass the tests with control characters
|
||||||
|
in the names so if these fail and the provider doesn't support
|
||||||
|
`urlEncodeListings` in the quirks then ignore them. Note that the
|
||||||
|
`SetTier` test may also fail on non AWS providers.
|
||||||
|
|
||||||
|
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
|
||||||
|
|
||||||
|
## Writing a plugin
|
||||||
|
|
||||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||||
|
|
||||||
Usage
|
### Usage
|
||||||
|
|
||||||
- Naming
|
- Naming
|
||||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||||
@@ -500,7 +560,7 @@ Usage
|
|||||||
- Plugins must be compiled against the exact version of rclone to work.
|
- Plugins must be compiled against the exact version of rclone to work.
|
||||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||||
|
|
||||||
Building
|
### Building
|
||||||
|
|
||||||
To turn your existing additions into a Go plugin, move them to an external repository
|
To turn your existing additions into a Go plugin, move them to an external repository
|
||||||
and change the top-level package name to `main`.
|
and change the top-level package name to `main`.
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
FROM golang AS builder
|
FROM golang:alpine AS builder
|
||||||
|
|
||||||
COPY . /go/src/github.com/rclone/rclone/
|
COPY . /go/src/github.com/rclone/rclone/
|
||||||
WORKDIR /go/src/github.com/rclone/rclone/
|
WORKDIR /go/src/github.com/rclone/rclone/
|
||||||
|
|
||||||
|
RUN apk add --no-cache make bash gawk git
|
||||||
RUN \
|
RUN \
|
||||||
CGO_ENABLED=0 \
|
CGO_ENABLED=0 \
|
||||||
make
|
make
|
||||||
|
|||||||
@@ -18,6 +18,11 @@ Current active maintainers of rclone are:
|
|||||||
| Caleb Case | @calebcase | storj backend |
|
| Caleb Case | @calebcase | storj backend |
|
||||||
| wiserain | @wiserain | pikpak backend |
|
| wiserain | @wiserain | pikpak backend |
|
||||||
| albertony | @albertony | |
|
| albertony | @albertony | |
|
||||||
|
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||||
|
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||||
|
| nielash | @nielash | bisync |
|
||||||
|
| Dan McArdle | @dmcardle | gitannex |
|
||||||
|
| Sam Harrison | @childish-sambino | filescom |
|
||||||
|
|
||||||
**This is a work in progress Draft**
|
**This is a work in progress Draft**
|
||||||
|
|
||||||
|
|||||||
16866
MANUAL.html
generated
16866
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
19457
MANUAL.txt
generated
19457
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
56
Makefile
56
Makefile
@@ -30,29 +30,37 @@ ifdef RELEASE_TAG
|
|||||||
TAG := $(RELEASE_TAG)
|
TAG := $(RELEASE_TAG)
|
||||||
endif
|
endif
|
||||||
GO_VERSION := $(shell go version)
|
GO_VERSION := $(shell go version)
|
||||||
|
GO_OS := $(shell go env GOOS)
|
||||||
ifdef BETA_SUBDIR
|
ifdef BETA_SUBDIR
|
||||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
BETA_UPLOAD_ROOT := beta.rclone.org:
|
||||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||||
ifdef GOTAGS
|
ifdef GOTAGS
|
||||||
BUILDTAGS=-tags "$(GOTAGS)"
|
BUILDTAGS=-tags "$(GOTAGS)"
|
||||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||||
endif
|
endif
|
||||||
|
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
|
||||||
|
|
||||||
.PHONY: rclone test_all vars version
|
.PHONY: rclone test_all vars version
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
ifeq ($(GO_OS),windows)
|
||||||
|
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
|
||||||
|
endif
|
||||||
|
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
|
||||||
|
ifeq ($(GO_OS),windows)
|
||||||
|
rm resource_windows_`go env GOARCH`.syso
|
||||||
|
endif
|
||||||
mkdir -p `go env GOPATH`/bin/
|
mkdir -p `go env GOPATH`/bin/
|
||||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||||
|
|
||||||
test_all:
|
test_all:
|
||||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
@echo SHELL="'$(SHELL)'"
|
@echo SHELL="'$(SHELL)'"
|
||||||
@@ -66,6 +74,10 @@ btest:
|
|||||||
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
||||||
@echo "Copied markdown of beta release to clip board"
|
@echo "Copied markdown of beta release to clip board"
|
||||||
|
|
||||||
|
btesth:
|
||||||
|
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
|
||||||
|
@echo "Copied beta release in HTML to clip board"
|
||||||
|
|
||||||
version:
|
version:
|
||||||
@echo '$(TAG)'
|
@echo '$(TAG)'
|
||||||
|
|
||||||
@@ -76,13 +88,13 @@ test: rclone test_all
|
|||||||
|
|
||||||
# Quick test
|
# Quick test
|
||||||
quicktest:
|
quicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
||||||
|
|
||||||
racequicktest:
|
racequicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
||||||
|
|
||||||
compiletest:
|
compiletest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
||||||
|
|
||||||
# Do source code quality checks
|
# Do source code quality checks
|
||||||
check: rclone
|
check: rclone
|
||||||
@@ -92,16 +104,12 @@ check: rclone
|
|||||||
|
|
||||||
# Get the build dependencies
|
# Get the build dependencies
|
||||||
build_dep:
|
build_dep:
|
||||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||||
|
|
||||||
# Get the release dependencies we only install on linux
|
# Get the release dependencies we only install on linux
|
||||||
release_dep_linux:
|
release_dep_linux:
|
||||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
||||||
|
|
||||||
# Get the release dependencies we only install on Windows
|
|
||||||
release_dep_windows:
|
|
||||||
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
|
|
||||||
|
|
||||||
# Update dependencies
|
# Update dependencies
|
||||||
showupdates:
|
showupdates:
|
||||||
@echo "*** Direct dependencies that could be updated ***"
|
@echo "*** Direct dependencies that could be updated ***"
|
||||||
@@ -136,17 +144,21 @@ MANUAL.txt: MANUAL.md
|
|||||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||||
|
|
||||||
commanddocs: rclone
|
commanddocs: rclone
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
-@rmdir -p '$$HOME/.config/rclone'
|
||||||
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||||
|
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||||
|
|
||||||
backenddocs: rclone bin/make_backend_docs.py
|
backenddocs: rclone bin/make_backend_docs.py
|
||||||
|
-@rmdir -p '$$HOME/.config/rclone'
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||||
|
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||||
|
|
||||||
rcdocs: rclone
|
rcdocs: rclone
|
||||||
bin/make_rc_docs.sh
|
bin/make_rc_docs.sh
|
||||||
|
|
||||||
install: rclone
|
install: rclone
|
||||||
install -d ${DESTDIR}/usr/bin
|
install -d ${DESTDIR}/usr/bin
|
||||||
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
go clean ./...
|
go clean ./...
|
||||||
@@ -160,7 +172,7 @@ website:
|
|||||||
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
||||||
|
|
||||||
upload_website: website
|
upload_website: website
|
||||||
rclone -v sync docs/public memstore:www-rclone-org
|
rclone -v sync docs/public www.rclone.org:
|
||||||
|
|
||||||
upload_test_website: website
|
upload_test_website: website
|
||||||
rclone -P sync docs/public test-rclone-org:
|
rclone -P sync docs/public test-rclone-org:
|
||||||
@@ -187,8 +199,8 @@ check_sign:
|
|||||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||||
|
|
||||||
upload:
|
upload:
|
||||||
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
rclone -P copy build/ downloads.rclone.org:/$(TAG)
|
||||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"'
|
||||||
|
|
||||||
upload_github:
|
upload_github:
|
||||||
./bin/upload-github $(TAG)
|
./bin/upload-github $(TAG)
|
||||||
@@ -198,7 +210,7 @@ cross: doc
|
|||||||
|
|
||||||
beta:
|
beta:
|
||||||
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
rclone -v copy build/ pub.rclone.org:/$(TAG)
|
||||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||||
|
|
||||||
log_since_last_release:
|
log_since_last_release:
|
||||||
@@ -211,18 +223,18 @@ ci_upload:
|
|||||||
sudo chown -R $$USER build
|
sudo chown -R $$USER build
|
||||||
find build -type l -delete
|
find build -type l -delete
|
||||||
gzip -r9v build
|
gzip -r9v build
|
||||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||||
|
|
||||||
ci_beta:
|
ci_beta:
|
||||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)
|
@echo Beta release ready at $(BETA_URL)
|
||||||
|
|
||||||
@@ -231,7 +243,7 @@ fetch_binaries:
|
|||||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||||
|
|
||||||
serve: website
|
serve: website
|
||||||
cd docs && hugo server -v -w --disableFastRender
|
cd docs && hugo server --logLevel info -w --disableFastRender
|
||||||
|
|
||||||
tag: retag doc
|
tag: retag doc
|
||||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||||
|
|||||||
38
README.md
38
README.md
@@ -1,3 +1,21 @@
|
|||||||
|
<div align="center">
|
||||||
|
<sup>Special thanks to our sponsor:</sup>
|
||||||
|
<br>
|
||||||
|
<br>
|
||||||
|
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
|
||||||
|
<div>
|
||||||
|
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
|
||||||
|
</div>
|
||||||
|
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
|
||||||
|
<div>
|
||||||
|
<sup>Visit warp.dev to learn more.</sup>
|
||||||
|
</div>
|
||||||
|
</a>
|
||||||
|
<br>
|
||||||
|
<hr>
|
||||||
|
</div>
|
||||||
|
<br>
|
||||||
|
|
||||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||||
|
|
||||||
@@ -23,7 +41,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
|
||||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||||
@@ -38,25 +55,35 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||||
|
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
|
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
|
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||||
|
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||||
|
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||||
|
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||||
|
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||||
|
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||||
|
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||||
|
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||||
@@ -66,25 +93,33 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||||
|
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||||
|
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||||
|
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||||
|
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||||
|
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||||
|
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
|
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||||
|
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
@@ -113,6 +148,7 @@ These backends adapt or modify other storage providers
|
|||||||
* Partial syncs supported on a whole file basis
|
* Partial syncs supported on a whole file basis
|
||||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||||
|
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
|
||||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||||
* Can sync to and from network, e.g. two different cloud accounts
|
* Can sync to and from network, e.g. two different cloud accounts
|
||||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||||
|
|||||||
110
RELEASE.md
110
RELEASE.md
@@ -37,16 +37,45 @@ This file describes how to make the various kinds of releases
|
|||||||
|
|
||||||
## Update dependencies
|
## Update dependencies
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies
|
Early in the next release cycle update the dependencies.
|
||||||
|
|
||||||
* Review any pinned packages in go.mod and remove if possible
|
* Review any pinned packages in go.mod and remove if possible
|
||||||
* make updatedirect
|
* `make updatedirect`
|
||||||
* make
|
* `make GOTAGS=cmount`
|
||||||
* git commit -a -v
|
* `make compiletest`
|
||||||
* make update
|
* Fix anything which doesn't compile at this point and commit changes here
|
||||||
* make
|
* `git commit -a -v -m "build: update all dependencies"`
|
||||||
|
|
||||||
|
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||||
|
then go to manual mode. `go1.20` here is the lowest supported version
|
||||||
|
in the `go.mod`.
|
||||||
|
|
||||||
|
```
|
||||||
|
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||||
|
go get -d $(cat /tmp/potential-upgrades)
|
||||||
|
go mod tidy -go=1.20 -compat=1.20
|
||||||
|
```
|
||||||
|
|
||||||
|
If the `go mod tidy` fails use the output from it to remove the
|
||||||
|
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||||
|
done
|
||||||
|
|
||||||
|
```
|
||||||
|
git co go.mod go.sum
|
||||||
|
```
|
||||||
|
|
||||||
|
And try again.
|
||||||
|
|
||||||
|
Optionally upgrade the direct and indirect dependencies. This is very
|
||||||
|
likely to fail if the manual method was used abve - in that case
|
||||||
|
ignore it as it is too time consuming to fix.
|
||||||
|
|
||||||
|
* `make update`
|
||||||
|
* `make GOTAGS=cmount`
|
||||||
|
* `make compiletest`
|
||||||
* roll back any updates which didn't compile
|
* roll back any updates which didn't compile
|
||||||
* git commit -a -v --amend
|
* `git commit -a -v --amend`
|
||||||
|
* **NB** watch out for this changing the default go version in `go.mod`
|
||||||
|
|
||||||
Note that `make update` updates all direct and indirect dependencies
|
Note that `make update` updates all direct and indirect dependencies
|
||||||
and there can occasionally be forwards compatibility problems with
|
and there can occasionally be forwards compatibility problems with
|
||||||
@@ -54,6 +83,9 @@ doing that so it may be necessary to roll back dependencies to the
|
|||||||
version specified by `make updatedirect` in order to get rclone to
|
version specified by `make updatedirect` in order to get rclone to
|
||||||
build.
|
build.
|
||||||
|
|
||||||
|
Once it compiles locally, push it on a test branch and commit fixes
|
||||||
|
until the tests pass.
|
||||||
|
|
||||||
## Tidy beta
|
## Tidy beta
|
||||||
|
|
||||||
At some point after the release run
|
At some point after the release run
|
||||||
@@ -90,34 +122,54 @@ Now
|
|||||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||||
* git push
|
* git push
|
||||||
|
|
||||||
|
## Sponsor logos
|
||||||
|
|
||||||
|
If updating the website note that the sponsor logos have been moved out of the main repository.
|
||||||
|
|
||||||
|
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
|
||||||
|
which is a private repo containing artwork from sponsors.
|
||||||
|
|
||||||
|
## Update the website between releases
|
||||||
|
|
||||||
|
Create an update website branch based off the last release
|
||||||
|
|
||||||
|
git co -b update-website
|
||||||
|
|
||||||
|
If the branch already exists, double check there are no commits that need saving.
|
||||||
|
|
||||||
|
Now reset the branch to the last release
|
||||||
|
|
||||||
|
git reset --hard v1.64.0
|
||||||
|
|
||||||
|
Create the changes, check them in, test with `make serve` then
|
||||||
|
|
||||||
|
make upload_test_website
|
||||||
|
|
||||||
|
Check out https://test.rclone.org and when happy
|
||||||
|
|
||||||
|
make upload_website
|
||||||
|
|
||||||
|
Cherry pick any changes back to master and the stable branch if it is active.
|
||||||
|
|
||||||
## Making a manual build of docker
|
## Making a manual build of docker
|
||||||
|
|
||||||
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
To do a basic build of rclone's docker image to debug builds locally:
|
||||||
or needs to be updated then rebuild like this.
|
|
||||||
|
|
||||||
See: https://github.com/ilteoood/docker_buildx/issues/19
|
|
||||||
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
|
||||||
|
|
||||||
```
|
```
|
||||||
git co v1.54.1
|
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
||||||
docker pull golang
|
docker run --rm rclone/rclone:testing version
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
|
||||||
docker buildx create --name actions_builder --use
|
|
||||||
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
|
||||||
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
|
||||||
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
|
||||||
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
|
||||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
|
||||||
docker buildx stop actions_builder
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Old build for linux/amd64 only
|
To test the multipatform build
|
||||||
|
|
||||||
```
|
```
|
||||||
docker pull golang
|
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
||||||
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
```
|
||||||
docker push rclone/rclone:1.52.0
|
|
||||||
docker push rclone/rclone:1.52
|
To make a full build then set the tags correctly and add `--push`
|
||||||
docker push rclone/rclone:1
|
|
||||||
docker push rclone/rclone:latest
|
Note that you can't only build one architecture - you need to build them all.
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
|||||||
configfile.Install()
|
configfile.Install()
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.FileSet(remoteName, "type", "alias")
|
config.FileSetValue(remoteName, "type", "alias")
|
||||||
config.FileSet(remoteName, "remote", root)
|
config.FileSetValue(remoteName, "remote", root)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewFS(t *testing.T) {
|
func TestNewFS(t *testing.T) {
|
||||||
@@ -81,10 +81,12 @@ func TestNewFS(t *testing.T) {
|
|||||||
for i, gotEntry := range gotEntries {
|
for i, gotEntry := range gotEntries {
|
||||||
what := fmt.Sprintf("%s, entry=%d", what, i)
|
what := fmt.Sprintf("%s, entry=%d", what, i)
|
||||||
wantEntry := test.entries[i]
|
wantEntry := test.entries[i]
|
||||||
|
_, isDir := gotEntry.(fs.Directory)
|
||||||
|
|
||||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
if !isDir {
|
||||||
_, isDir := gotEntry.(fs.Directory)
|
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||||
|
}
|
||||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ package all
|
|||||||
import (
|
import (
|
||||||
// Active file systems
|
// Active file systems
|
||||||
_ "github.com/rclone/rclone/backend/alias"
|
_ "github.com/rclone/rclone/backend/alias"
|
||||||
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
|
||||||
_ "github.com/rclone/rclone/backend/azureblob"
|
_ "github.com/rclone/rclone/backend/azureblob"
|
||||||
|
_ "github.com/rclone/rclone/backend/azurefiles"
|
||||||
_ "github.com/rclone/rclone/backend/b2"
|
_ "github.com/rclone/rclone/backend/b2"
|
||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
@@ -17,16 +17,21 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/dropbox"
|
_ "github.com/rclone/rclone/backend/dropbox"
|
||||||
_ "github.com/rclone/rclone/backend/fichier"
|
_ "github.com/rclone/rclone/backend/fichier"
|
||||||
_ "github.com/rclone/rclone/backend/filefabric"
|
_ "github.com/rclone/rclone/backend/filefabric"
|
||||||
|
_ "github.com/rclone/rclone/backend/filescom"
|
||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
|
_ "github.com/rclone/rclone/backend/gofile"
|
||||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
_ "github.com/rclone/rclone/backend/hasher"
|
_ "github.com/rclone/rclone/backend/hasher"
|
||||||
_ "github.com/rclone/rclone/backend/hdfs"
|
_ "github.com/rclone/rclone/backend/hdfs"
|
||||||
_ "github.com/rclone/rclone/backend/hidrive"
|
_ "github.com/rclone/rclone/backend/hidrive"
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
|
_ "github.com/rclone/rclone/backend/iclouddrive"
|
||||||
|
_ "github.com/rclone/rclone/backend/imagekit"
|
||||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
_ "github.com/rclone/rclone/backend/koofr"
|
_ "github.com/rclone/rclone/backend/koofr"
|
||||||
|
_ "github.com/rclone/rclone/backend/linkbox"
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
_ "github.com/rclone/rclone/backend/mailru"
|
_ "github.com/rclone/rclone/backend/mailru"
|
||||||
_ "github.com/rclone/rclone/backend/mega"
|
_ "github.com/rclone/rclone/backend/mega"
|
||||||
@@ -37,9 +42,12 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||||
_ "github.com/rclone/rclone/backend/pcloud"
|
_ "github.com/rclone/rclone/backend/pcloud"
|
||||||
_ "github.com/rclone/rclone/backend/pikpak"
|
_ "github.com/rclone/rclone/backend/pikpak"
|
||||||
|
_ "github.com/rclone/rclone/backend/pixeldrain"
|
||||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||||
|
_ "github.com/rclone/rclone/backend/protondrive"
|
||||||
_ "github.com/rclone/rclone/backend/putio"
|
_ "github.com/rclone/rclone/backend/putio"
|
||||||
_ "github.com/rclone/rclone/backend/qingstor"
|
_ "github.com/rclone/rclone/backend/qingstor"
|
||||||
|
_ "github.com/rclone/rclone/backend/quatrix"
|
||||||
_ "github.com/rclone/rclone/backend/s3"
|
_ "github.com/rclone/rclone/backend/s3"
|
||||||
_ "github.com/rclone/rclone/backend/seafile"
|
_ "github.com/rclone/rclone/backend/seafile"
|
||||||
_ "github.com/rclone/rclone/backend/sftp"
|
_ "github.com/rclone/rclone/backend/sftp"
|
||||||
@@ -49,6 +57,7 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/storj"
|
_ "github.com/rclone/rclone/backend/storj"
|
||||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
|
_ "github.com/rclone/rclone/backend/ulozto"
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
_ "github.com/rclone/rclone/backend/uptobox"
|
_ "github.com/rclone/rclone/backend/uptobox"
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,21 +0,0 @@
|
|||||||
// Test AmazonCloudDrive filesystem interface
|
|
||||||
|
|
||||||
//go:build acd
|
|
||||||
// +build acd
|
|
||||||
|
|
||||||
package amazonclouddrive_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/amazonclouddrive"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
|
||||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
|
||||||
fstests.Run(t)
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js && go1.18
|
//go:build !plan9 && !solaris && !js
|
||||||
// +build !plan9,!solaris,!js,go1.18
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
@@ -17,20 +16,3 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
enabled = f.Features().GetTier
|
enabled = f.Features().GetTier
|
||||||
assert.True(t, enabled)
|
assert.True(t, enabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIncrement(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
in []byte
|
|
||||||
want []byte
|
|
||||||
}{
|
|
||||||
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
|
||||||
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
|
||||||
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
|
||||||
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
|
||||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
|
||||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
|
||||||
} {
|
|
||||||
increment(test.in)
|
|
||||||
assert.Equal(t, test.want, test.in)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !solaris && !js && go1.18
|
//go:build !plan9 && !solaris && !js
|
||||||
// +build !plan9,!solaris,!js,go1.18
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
@@ -19,7 +18,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestAzureBlob:",
|
RemoteName: "TestAzureBlob:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool"},
|
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
MinChunkSize: defaultChunkSize,
|
MinChunkSize: defaultChunkSize,
|
||||||
},
|
},
|
||||||
@@ -31,11 +30,11 @@ func TestIntegration2(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
name := "TestAzureBlob:"
|
name := "TestAzureBlob"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name,
|
RemoteName: name + ":",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool"},
|
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
MinChunkSize: defaultChunkSize,
|
MinChunkSize: defaultChunkSize,
|
||||||
},
|
},
|
||||||
@@ -62,6 +61,7 @@ func TestValidateAccessTier(t *testing.T) {
|
|||||||
"HOT": {"HOT", true},
|
"HOT": {"HOT", true},
|
||||||
"Hot": {"Hot", true},
|
"Hot": {"Hot", true},
|
||||||
"cool": {"cool", true},
|
"cool": {"cool", true},
|
||||||
|
"cold": {"cold", true},
|
||||||
"archive": {"archive", true},
|
"archive": {"archive", true},
|
||||||
"empty": {"", false},
|
"empty": {"", false},
|
||||||
"unknown": {"unknown", false},
|
"unknown": {"unknown", false},
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// Build for azureblob for unsupported platforms to stop go complaining
|
// Build for azureblob for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || solaris || js || !go1.18
|
//go:build plan9 || solaris || js
|
||||||
// +build plan9 solaris js !go1.18
|
|
||||||
|
|
||||||
|
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
1364
backend/azurefiles/azurefiles.go
Normal file
1364
backend/azurefiles/azurefiles.go
Normal file
File diff suppressed because it is too large
Load Diff
69
backend/azurefiles/azurefiles_internal_test.go
Normal file
69
backend/azurefiles/azurefiles_internal_test.go
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
|
package azurefiles
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math/rand"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
|
t.Run("Authentication", f.InternalTestAuth)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|
||||||
|
func (f *Fs) InternalTestAuth(t *testing.T) {
|
||||||
|
t.Skip("skipping since this requires authentication credentials which are not part of repo")
|
||||||
|
shareName := "test-rclone-oct-2023"
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
options *Options
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "ConnectionString",
|
||||||
|
options: &Options{
|
||||||
|
ShareName: shareName,
|
||||||
|
ConnectionString: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "AccountAndKey",
|
||||||
|
options: &Options{
|
||||||
|
ShareName: shareName,
|
||||||
|
Account: "",
|
||||||
|
Key: "",
|
||||||
|
}},
|
||||||
|
{
|
||||||
|
name: "SASUrl",
|
||||||
|
options: &Options{
|
||||||
|
ShareName: shareName,
|
||||||
|
SASURL: "",
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
dirName := randomString(10)
|
||||||
|
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
||||||
|
|
||||||
|
func randomString(charCount int) string {
|
||||||
|
strBldr := strings.Builder{}
|
||||||
|
for i := 0; i < charCount; i++ {
|
||||||
|
randPos := rand.Int63n(52)
|
||||||
|
strBldr.WriteByte(chars[randPos])
|
||||||
|
}
|
||||||
|
return strBldr.String()
|
||||||
|
}
|
||||||
17
backend/azurefiles/azurefiles_test.go
Normal file
17
backend/azurefiles/azurefiles_test.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
|
package azurefiles
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
var objPtr *Object
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestAzureFiles:",
|
||||||
|
NilObject: objPtr,
|
||||||
|
})
|
||||||
|
}
|
||||||
7
backend/azurefiles/azurefiles_unsupported.go
Normal file
7
backend/azurefiles/azurefiles_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Build for azurefiles for unsupported platforms to stop go complaining
|
||||||
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
|
//go:build plan9 || js
|
||||||
|
|
||||||
|
// Package azurefiles provides an interface to Microsoft Azure Files
|
||||||
|
package azurefiles
|
||||||
@@ -33,10 +33,18 @@ var _ fserrors.Fataler = (*Error)(nil)
|
|||||||
|
|
||||||
// Bucket describes a B2 bucket
|
// Bucket describes a B2 bucket
|
||||||
type Bucket struct {
|
type Bucket struct {
|
||||||
ID string `json:"bucketId"`
|
ID string `json:"bucketId"`
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
|
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// LifecycleRule is a single lifecycle rule
|
||||||
|
type LifecycleRule struct {
|
||||||
|
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||||
|
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||||
|
FileNamePrefix string `json:"fileNamePrefix"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||||
@@ -206,9 +214,10 @@ type FileInfo struct {
|
|||||||
|
|
||||||
// CreateBucketRequest is used to create a bucket
|
// CreateBucketRequest is used to create a bucket
|
||||||
type CreateBucketRequest struct {
|
type CreateBucketRequest struct {
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
|
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBucketRequest is used to create a bucket
|
// DeleteBucketRequest is used to create a bucket
|
||||||
@@ -331,3 +340,11 @@ type CopyPartRequest struct {
|
|||||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateBucketRequest describes a request to modify a B2 bucket
|
||||||
|
type UpdateBucketRequest struct {
|
||||||
|
ID string `json:"bucketId"`
|
||||||
|
AccountID string `json:"accountId"`
|
||||||
|
Type string `json:"bucketType,omitempty"`
|
||||||
|
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTimestampEqual(t *testing.T) {
|
func TestTimestampEqual(t *testing.T) {
|
||||||
assert.False(t, emptyT.Equal(emptyT))
|
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||||
assert.False(t, t0.Equal(emptyT))
|
assert.False(t, t0.Equal(emptyT))
|
||||||
assert.False(t, emptyT.Equal(t0))
|
assert.False(t, emptyT.Equal(t0))
|
||||||
assert.False(t, t0.Equal(t1))
|
assert.False(t, t0.Equal(t1))
|
||||||
assert.False(t, t1.Equal(t0))
|
assert.False(t, t1.Equal(t0))
|
||||||
assert.True(t, t0.Equal(t0))
|
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||||
assert.True(t, t1.Equal(t1))
|
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||||
}
|
}
|
||||||
|
|||||||
569
backend/b2/b2.go
569
backend/b2/b2.go
@@ -9,6 +9,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
gohash "hash"
|
gohash "hash"
|
||||||
@@ -32,6 +33,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
"github.com/rclone/rclone/lib/multipart"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/pool"
|
"github.com/rclone/rclone/lib/pool"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
@@ -57,9 +59,8 @@ const (
|
|||||||
minChunkSize = 5 * fs.Mebi
|
minChunkSize = 5 * fs.Mebi
|
||||||
defaultChunkSize = 96 * fs.Mebi
|
defaultChunkSize = 96 * fs.Mebi
|
||||||
defaultUploadCutoff = 200 * fs.Mebi
|
defaultUploadCutoff = 200 * fs.Mebi
|
||||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
defaultMaxAge = 24 * time.Hour
|
||||||
memoryPoolUseMmap = false
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@@ -74,14 +75,17 @@ func init() {
|
|||||||
Name: "b2",
|
Name: "b2",
|
||||||
Description: "Backblaze B2",
|
Description: "Backblaze B2",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "account",
|
Name: "account",
|
||||||
Help: "Account ID or Application Key ID.",
|
Help: "Account ID or Application Key ID.",
|
||||||
Required: true,
|
Required: true,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "key",
|
Name: "key",
|
||||||
Help: "Application Key.",
|
Help: "Application Key.",
|
||||||
Required: true,
|
Required: true,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||||
@@ -98,7 +102,7 @@ below will cause b2 to return specific errors:
|
|||||||
* "force_cap_exceeded"
|
* "force_cap_exceeded"
|
||||||
|
|
||||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
|
in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist).`,
|
||||||
Default: "",
|
Default: "",
|
||||||
Hide: fs.OptionHideConfigurator,
|
Hide: fs.OptionHideConfigurator,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -147,6 +151,18 @@ might a maximum of "--transfers" chunks in progress at once.
|
|||||||
5,000,000 Bytes is the minimum size.`,
|
5,000,000 Bytes is the minimum size.`,
|
||||||
Default: defaultChunkSize,
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "upload_concurrency",
|
||||||
|
Help: `Concurrency for multipart uploads.
|
||||||
|
|
||||||
|
This is the number of chunks of the same file that are uploaded
|
||||||
|
concurrently.
|
||||||
|
|
||||||
|
Note that chunks are stored in memory and there may be up to
|
||||||
|
"--transfers" * "--b2-upload-concurrency" chunks stored at once
|
||||||
|
in memory.`,
|
||||||
|
Default: 4,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_checksum",
|
Name: "disable_checksum",
|
||||||
Help: `Disable checksums for large (> upload cutoff) files.
|
Help: `Disable checksums for large (> upload cutoff) files.
|
||||||
@@ -178,29 +194,57 @@ Example:
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "download_auth_duration",
|
Name: "download_auth_duration",
|
||||||
Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
|
Help: `Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
|
||||||
|
|
||||||
|
This is used in combination with "rclone link" for making files
|
||||||
|
accessible to the public and sets the duration before the download
|
||||||
|
authorization token will expire.
|
||||||
|
|
||||||
The duration before the download authorization token will expire.
|
|
||||||
The minimum value is 1 second. The maximum value is one week.`,
|
The minimum value is 1 second. The maximum value is one week.`,
|
||||||
Default: fs.Duration(7 * 24 * time.Hour),
|
Default: fs.Duration(7 * 24 * time.Hour),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "memory_pool_flush_time",
|
Name: "memory_pool_flush_time",
|
||||||
Default: memoryPoolFlushTime,
|
Default: fs.Duration(time.Minute),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Help: `How often internal memory buffer pools will be flushed.
|
Hide: fs.OptionHideBoth,
|
||||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
|
||||||
This option controls how often unused buffers will be removed from the pool.`,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "memory_pool_use_mmap",
|
Name: "memory_pool_use_mmap",
|
||||||
Default: memoryPoolUseMmap,
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
Hide: fs.OptionHideBoth,
|
||||||
|
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
||||||
|
}, {
|
||||||
|
Name: "lifecycle",
|
||||||
|
Help: `Set the number of days deleted files should be kept when creating a bucket.
|
||||||
|
|
||||||
|
On bucket creation, this parameter is used to create a lifecycle rule
|
||||||
|
for the entire bucket.
|
||||||
|
|
||||||
|
If lifecycle is 0 (the default) it does not create a lifecycle rule so
|
||||||
|
the default B2 behaviour applies. This is to create versions of files
|
||||||
|
on delete and overwrite and to keep them indefinitely.
|
||||||
|
|
||||||
|
If lifecycle is >0 then it creates a single rule setting the number of
|
||||||
|
days before a file that is deleted or overwritten is deleted
|
||||||
|
permanently. This is known as daysFromHidingToDeleting in the b2 docs.
|
||||||
|
|
||||||
|
The minimum value for this parameter is 1 day.
|
||||||
|
|
||||||
|
You can also enable hard_delete in the config also which will mean
|
||||||
|
deletions won't cause versions but overwrites will still cause
|
||||||
|
versions to be made.
|
||||||
|
|
||||||
|
See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation.
|
||||||
|
`,
|
||||||
|
Default: 0,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
// See: https://www.backblaze.com/b2/docs/files.html
|
// See: https://www.backblaze.com/docs/cloud-storage-files
|
||||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||||
// FIXME: allow /, but not leading, trailing or double
|
// FIXME: allow /, but not leading, trailing or double
|
||||||
Default: (encoder.Display |
|
Default: (encoder.Display |
|
||||||
@@ -222,11 +266,11 @@ type Options struct {
|
|||||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
|
UploadConcurrency int `config:"upload_concurrency"`
|
||||||
DisableCheckSum bool `config:"disable_checksum"`
|
DisableCheckSum bool `config:"disable_checksum"`
|
||||||
DownloadURL string `config:"download_url"`
|
DownloadURL string `config:"download_url"`
|
||||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
Lifecycle int `config:"lifecycle"`
|
||||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -251,18 +295,18 @@ type Fs struct {
|
|||||||
authMu sync.Mutex // lock for authorizing the account
|
authMu sync.Mutex // lock for authorizing the account
|
||||||
pacer *fs.Pacer // To pace and retry the API calls
|
pacer *fs.Pacer // To pace and retry the API calls
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
pool *pool.Pool // memory pool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a b2 object
|
// Object describes a b2 object
|
||||||
type Object struct {
|
type Object struct {
|
||||||
fs *Fs // what this object is part of
|
fs *Fs // what this object is part of
|
||||||
remote string // The remote path
|
remote string // The remote path
|
||||||
id string // b2 id of the file
|
id string // b2 id of the file
|
||||||
modTime time.Time // The modified time of the object if known
|
modTime time.Time // The modified time of the object if known
|
||||||
sha1 string // SHA-1 hash if known
|
sha1 string // SHA-1 hash if known
|
||||||
size int64 // Size of the object
|
size int64 // Size of the object
|
||||||
mimeType string // Content-Type of the object
|
mimeType string // Content-Type of the object
|
||||||
|
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -320,7 +364,7 @@ var retryErrorCodes = []int{
|
|||||||
504, // Gateway Time-out
|
504, // Gateway Time-out
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetryNoAuth returns a boolean as to whether this resp and err
|
// shouldRetryNoReauth returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
if fserrors.ContextError(ctx, &err) {
|
if fserrors.ContextError(ctx, &err) {
|
||||||
@@ -361,11 +405,18 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
|||||||
|
|
||||||
// errorHandler parses a non 2xx error response into an error
|
// errorHandler parses a non 2xx error response into an error
|
||||||
func errorHandler(resp *http.Response) error {
|
func errorHandler(resp *http.Response) error {
|
||||||
// Decode error response
|
body, err := rest.ReadBody(resp)
|
||||||
errResponse := new(api.Error)
|
|
||||||
err := rest.DecodeJSON(resp, &errResponse)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
fs.Errorf(nil, "Couldn't read error out of body: %v", err)
|
||||||
|
body = nil
|
||||||
|
}
|
||||||
|
// Decode error response if there was one - they can be blank
|
||||||
|
errResponse := new(api.Error)
|
||||||
|
if len(body) > 0 {
|
||||||
|
err = json.Unmarshal(body, errResponse)
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(nil, "Couldn't decode error response: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if errResponse.Code == "" {
|
if errResponse.Code == "" {
|
||||||
errResponse.Code = "unknown"
|
errResponse.Code = "unknown"
|
||||||
@@ -409,6 +460,14 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||||
|
err = checkUploadChunkSize(cs)
|
||||||
|
if err == nil {
|
||||||
|
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// setRoot changes the root of the Fs
|
// setRoot changes the root of the Fs
|
||||||
func (f *Fs) setRoot(root string) {
|
func (f *Fs) setRoot(root string) {
|
||||||
f.root = parsePath(root)
|
f.root = parsePath(root)
|
||||||
@@ -456,19 +515,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
pool: pool.New(
|
|
||||||
time.Duration(opt.MemoryPoolFlushTime),
|
|
||||||
int(opt.ChunkSize),
|
|
||||||
ci.Transfers,
|
|
||||||
opt.MemoryPoolUseMmap,
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
|
ChunkWriterDoesntSeek: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
// Set the test flag if required
|
// Set the test flag if required
|
||||||
if opt.TestMode != "" {
|
if opt.TestMode != "" {
|
||||||
@@ -595,23 +649,24 @@ func (f *Fs) clearUploadURL(bucketID string) {
|
|||||||
f.uploadMu.Unlock()
|
f.uploadMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
|
// getRW gets a RW buffer and an upload token
|
||||||
//
|
//
|
||||||
// If noBuf is set then it just gets an upload token
|
// If noBuf is set then it just gets an upload token
|
||||||
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
|
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
|
||||||
f.uploadToken.Get()
|
f.uploadToken.Get()
|
||||||
if !noBuf {
|
if !noBuf {
|
||||||
buf = f.pool.Get()
|
rw = multipart.NewRW()
|
||||||
}
|
}
|
||||||
return buf
|
return rw
|
||||||
}
|
}
|
||||||
|
|
||||||
// putBuf returns a buffer to the memory pool and an upload token
|
// putRW returns a RW buffer to the memory pool and returns an upload
|
||||||
|
// token
|
||||||
//
|
//
|
||||||
// If noBuf is set then it just returns the upload token
|
// If buf is nil then it just returns the upload token
|
||||||
func (f *Fs) putBuf(buf []byte, noBuf bool) {
|
func (f *Fs) putRW(rw *pool.RW) {
|
||||||
if !noBuf {
|
if rw != nil {
|
||||||
f.pool.Put(buf)
|
_ = rw.Close()
|
||||||
}
|
}
|
||||||
f.uploadToken.Put()
|
f.uploadToken.Put()
|
||||||
}
|
}
|
||||||
@@ -818,7 +873,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
|||||||
|
|
||||||
// listBuckets returns all the buckets to out
|
// listBuckets returns all the buckets to out
|
||||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error {
|
||||||
d := fs.NewDir(bucket.Name, time.Time{})
|
d := fs.NewDir(bucket.Name, time.Time{})
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
return nil
|
return nil
|
||||||
@@ -911,11 +966,14 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
type listBucketFn func(*api.Bucket) error
|
type listBucketFn func(*api.Bucket) error
|
||||||
|
|
||||||
// listBucketsToFn lists the buckets to the function supplied
|
// listBucketsToFn lists the buckets to the function supplied
|
||||||
func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
|
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
||||||
var account = api.ListBucketsRequest{
|
var account = api.ListBucketsRequest{
|
||||||
AccountID: f.info.AccountID,
|
AccountID: f.info.AccountID,
|
||||||
BucketID: f.info.Allowed.BucketID,
|
BucketID: f.info.Allowed.BucketID,
|
||||||
}
|
}
|
||||||
|
if bucketName != "" && account.BucketID == "" {
|
||||||
|
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
var response api.ListBucketsResponse
|
var response api.ListBucketsResponse
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -961,7 +1019,7 @@ func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType strin
|
|||||||
if bucketType != "" {
|
if bucketType != "" {
|
||||||
return bucketType, nil
|
return bucketType, nil
|
||||||
}
|
}
|
||||||
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
|
||||||
// listBucketsToFn reads bucket Types
|
// listBucketsToFn reads bucket Types
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -996,7 +1054,7 @@ func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, e
|
|||||||
if bucketID != "" {
|
if bucketID != "" {
|
||||||
return bucketID, nil
|
return bucketID, nil
|
||||||
}
|
}
|
||||||
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
|
||||||
// listBucketsToFn sets IDs
|
// listBucketsToFn sets IDs
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -1060,6 +1118,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
|||||||
Name: f.opt.Enc.FromStandardName(bucket),
|
Name: f.opt.Enc.FromStandardName(bucket),
|
||||||
Type: "allPrivate",
|
Type: "allPrivate",
|
||||||
}
|
}
|
||||||
|
if f.opt.Lifecycle > 0 {
|
||||||
|
request.LifecycleRules = []api.LifecycleRule{{
|
||||||
|
DaysFromHidingToDeleting: &f.opt.Lifecycle,
|
||||||
|
}}
|
||||||
|
}
|
||||||
var response api.Bucket
|
var response api.Bucket
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
@@ -1187,7 +1250,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
|||||||
// if oldOnly is true then it deletes only non current files.
|
// if oldOnly is true then it deletes only non current files.
|
||||||
//
|
//
|
||||||
// Implemented here so we can make sure we delete old versions.
|
// Implemented here so we can make sure we delete old versions.
|
||||||
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) error {
|
||||||
bucket, directory := f.split(dir)
|
bucket, directory := f.split(dir)
|
||||||
if bucket == "" {
|
if bucket == "" {
|
||||||
return errors.New("can't purge from root")
|
return errors.New("can't purge from root")
|
||||||
@@ -1205,7 +1268,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||||
return time.Since(time.Time(timestamp)).Hours() > 24
|
return time.Since(time.Time(timestamp)) > maxAge
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete Config.Transfers in parallel
|
// Delete Config.Transfers in parallel
|
||||||
@@ -1228,6 +1291,21 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
if oldOnly {
|
||||||
|
if deleteHidden && deleteUnfinished {
|
||||||
|
fs.Infof(f, "cleaning bucket %q of all hidden files, and pending multipart uploads older than %v", bucket, maxAge)
|
||||||
|
} else if deleteHidden {
|
||||||
|
fs.Infof(f, "cleaning bucket %q of all hidden files", bucket)
|
||||||
|
} else if deleteUnfinished {
|
||||||
|
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
|
||||||
|
} else {
|
||||||
|
fs.Errorf(f, "cleaning bucket %q of nothing. This should never happen!", bucket)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fs.Infof(f, "cleaning bucket %q of all files", bucket)
|
||||||
|
}
|
||||||
|
|
||||||
last := ""
|
last := ""
|
||||||
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
if !isDirectory {
|
if !isDirectory {
|
||||||
@@ -1238,14 +1316,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||||
if oldOnly && last != remote {
|
if oldOnly && last != remote {
|
||||||
// Check current version of the file
|
// Check current version of the file
|
||||||
if object.Action == "hide" {
|
if deleteHidden && object.Action == "hide" {
|
||||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||||
toBeDeleted <- object
|
toBeDeleted <- object
|
||||||
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||||
toBeDeleted <- object
|
toBeDeleted <- object
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
||||||
@@ -1267,12 +1345,17 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
|
|
||||||
// Purge deletes all the files and directories including the old versions.
|
// Purge deletes all the files and directories including the old versions.
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
return f.purge(ctx, dir, false)
|
return f.purge(ctx, dir, false, false, false, defaultMaxAge)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp deletes all the hidden files.
|
// CleanUp deletes all hidden files and pending multipart uploads older than 24 hours.
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
return f.purge(ctx, "", true)
|
return f.purge(ctx, "", true, true, true, defaultMaxAge)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanUp deletes all hidden files and/or pending multipart uploads older than the specified age.
|
||||||
|
func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) (err error) {
|
||||||
|
return f.purge(ctx, "", true, deleteHidden, deleteUnfinished, maxAge)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy does a server-side copy from dstObj <- srcObj
|
// copy does a server-side copy from dstObj <- srcObj
|
||||||
@@ -1280,7 +1363,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
// If newInfo is nil then the metadata will be copied otherwise it
|
// If newInfo is nil then the metadata will be copied otherwise it
|
||||||
// will be replaced with newInfo
|
// will be replaced with newInfo
|
||||||
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
|
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
|
||||||
if srcObj.size >= int64(f.opt.CopyCutoff) {
|
if srcObj.size > int64(f.opt.CopyCutoff) {
|
||||||
if newInfo == nil {
|
if newInfo == nil {
|
||||||
newInfo, err = srcObj.getMetaData(ctx)
|
newInfo, err = srcObj.getMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1291,7 +1374,11 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.Upload(ctx)
|
err = up.Copy(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return dstObj.decodeMetaDataFileInfo(up.info)
|
||||||
}
|
}
|
||||||
|
|
||||||
dstBucket, dstPath := dstObj.split()
|
dstBucket, dstPath := dstObj.split()
|
||||||
@@ -1420,7 +1507,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
absPath := "/" + bucketPath
|
absPath := "/" + urlEncode(bucketPath)
|
||||||
link = RootURL + "/file/" + urlEncode(bucket) + absPath
|
link = RootURL + "/file/" + urlEncode(bucket) + absPath
|
||||||
bucketType, err := f.getbucketType(ctx, bucket)
|
bucketType, err := f.getbucketType(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1480,7 +1567,7 @@ func (o *Object) Size() int64 {
|
|||||||
//
|
//
|
||||||
// Make sure it is lower case.
|
// Make sure it is lower case.
|
||||||
//
|
//
|
||||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
// Remove unverified prefix - see https://www.backblaze.com/docs/cloud-storage-upload-files-with-the-native-api
|
||||||
// Some tools (e.g. Cyberduck) use this
|
// Some tools (e.g. Cyberduck) use this
|
||||||
func cleanSHA1(sha1 string) string {
|
func cleanSHA1(sha1 string) string {
|
||||||
const unverified = "unverified:"
|
const unverified = "unverified:"
|
||||||
@@ -1507,7 +1594,14 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
|||||||
o.size = Size
|
o.size = Size
|
||||||
// Use the UploadTimestamp if can't get file info
|
// Use the UploadTimestamp if can't get file info
|
||||||
o.modTime = time.Time(UploadTimestamp)
|
o.modTime = time.Time(UploadTimestamp)
|
||||||
return o.parseTimeString(Info[timeKey])
|
err = o.parseTimeString(Info[timeKey])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// For now, just set "mtime" in metadata
|
||||||
|
o.meta = make(map[string]string, 1)
|
||||||
|
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeMetaData sets the metadata in the object from an api.File
|
// decodeMetaData sets the metadata in the object from an api.File
|
||||||
@@ -1609,6 +1703,16 @@ func timeString(modTime time.Time) string {
|
|||||||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseTimeStringHelper converts a decimal string number of milliseconds
|
||||||
|
// elapsed since January 1, 1970 UTC into a time.Time
|
||||||
|
func parseTimeStringHelper(timeString string) (time.Time, error) {
|
||||||
|
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
|
||||||
|
}
|
||||||
|
|
||||||
// parseTimeString converts a decimal string number of milliseconds
|
// parseTimeString converts a decimal string number of milliseconds
|
||||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||||
// the modTime variable.
|
// the modTime variable.
|
||||||
@@ -1616,12 +1720,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
|||||||
if timeString == "" {
|
if timeString == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
modTime, err := parseTimeStringHelper(timeString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
o.modTime = modTime
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1698,14 +1802,14 @@ func (file *openFile) Close() (err error) {
|
|||||||
|
|
||||||
// Check to see we read the correct number of bytes
|
// Check to see we read the correct number of bytes
|
||||||
if file.o.Size() != file.bytes {
|
if file.o.Size() != file.bytes {
|
||||||
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
return fmt.Errorf("corrupted on transfer: lengths differ want %d vs got %d", file.o.Size(), file.bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the SHA1
|
// Check the SHA1
|
||||||
receivedSHA1 := file.o.sha1
|
receivedSHA1 := file.o.sha1
|
||||||
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
||||||
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
||||||
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
return fmt.Errorf("corrupted on transfer: SHA1 hashes differ want %q vs got %q", receivedSHA1, calculatedSHA1)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1775,6 +1879,14 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
|||||||
ContentType: resp.Header.Get("Content-Type"),
|
ContentType: resp.Header.Get("Content-Type"),
|
||||||
Info: Info,
|
Info: Info,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Embryonic metadata support - just mtime
|
||||||
|
o.meta = make(map[string]string, 1)
|
||||||
|
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||||
|
if err == nil {
|
||||||
|
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
|
||||||
// When reading files from B2 via cloudflare using
|
// When reading files from B2 via cloudflare using
|
||||||
// --b2-download-url cloudflare strips the Content-Length
|
// --b2-download-url cloudflare strips the Content-Length
|
||||||
// headers (presumably so it can inject stuff) so use the old
|
// headers (presumably so it can inject stuff) so use the old
|
||||||
@@ -1859,11 +1971,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if size == -1 {
|
if size < 0 {
|
||||||
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
|
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
|
||||||
buf := o.fs.getBuf(false)
|
rw := o.fs.getRW(false)
|
||||||
|
|
||||||
n, err := io.ReadFull(in, buf)
|
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
bufReader := bufio.NewReader(in)
|
bufReader := bufio.NewReader(in)
|
||||||
in = bufReader
|
in = bufReader
|
||||||
@@ -1872,31 +1984,42 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
o.fs.putBuf(buf, false)
|
o.fs.putRW(rw)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// NB Stream returns the buffer and token
|
// NB Stream returns the buffer and token
|
||||||
return up.Stream(ctx, buf)
|
err = up.Stream(ctx, rw)
|
||||||
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return o.decodeMetaDataFileInfo(up.info)
|
||||||
|
} else if err == io.EOF {
|
||||||
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
||||||
defer o.fs.putBuf(buf, false)
|
defer o.fs.putRW(rw)
|
||||||
size = int64(n)
|
size = n
|
||||||
in = bytes.NewReader(buf[:n])
|
in = rw
|
||||||
} else {
|
} else {
|
||||||
o.fs.putBuf(buf, false)
|
o.fs.putRW(rw)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||||
|
Open: o.fs,
|
||||||
|
OpenOptions: options,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.Upload(ctx)
|
up := chunkWriter.(*largeUpload)
|
||||||
|
return o.decodeMetaDataFileInfo(up.info)
|
||||||
}
|
}
|
||||||
|
|
||||||
modTime := src.ModTime(ctx)
|
modTime, err := o.getModTime(ctx, src, options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||||
if calculatedSha1 == "" {
|
if calculatedSha1 == "" {
|
||||||
@@ -2001,6 +2124,71 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return o.decodeMetaDataFileInfo(&response)
|
return o.decodeMetaDataFileInfo(&response)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
|
||||||
|
// When metadata support is added to b2, this method will need a more generic name
|
||||||
|
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
|
||||||
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
|
// Fetch metadata if --metadata is in use
|
||||||
|
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||||
|
}
|
||||||
|
// merge metadata into request and user metadata
|
||||||
|
for k, v := range meta {
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
// For now, the only metadata we're concerned with is "mtime"
|
||||||
|
switch k {
|
||||||
|
case "mtime":
|
||||||
|
// mtime in meta overrides source ModTime
|
||||||
|
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||||
|
} else {
|
||||||
|
modTime = metaModTime
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Do nothing for now
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return modTime, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||||
|
//
|
||||||
|
// Pass in the remote and the src object
|
||||||
|
// You can also use options to hint at the desired chunk size
|
||||||
|
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||||
|
// FIXME what if file is smaller than 1 chunk?
|
||||||
|
if f.opt.Versions {
|
||||||
|
return info, nil, errNotWithVersions
|
||||||
|
}
|
||||||
|
if f.opt.VersionAt.IsSet() {
|
||||||
|
return info, nil, errNotWithVersionAt
|
||||||
|
}
|
||||||
|
//size := src.Size()
|
||||||
|
|
||||||
|
// Temporary Object under construction
|
||||||
|
o := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket, _ := o.split()
|
||||||
|
err = f.makeBucket(ctx, bucket)
|
||||||
|
if err != nil {
|
||||||
|
return info, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
info = fs.ChunkWriterInfo{
|
||||||
|
ChunkSize: int64(f.opt.ChunkSize),
|
||||||
|
Concurrency: o.fs.opt.UploadConcurrency,
|
||||||
|
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||||
|
}
|
||||||
|
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||||
|
return info, up, err
|
||||||
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
@@ -2026,16 +2214,201 @@ func (o *Object) ID() string {
|
|||||||
return o.id
|
return o.id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var lifecycleHelp = fs.CommandHelp{
|
||||||
|
Name: "lifecycle",
|
||||||
|
Short: "Read or set the lifecycle for a bucket",
|
||||||
|
Long: `This command can be used to read or set the lifecycle for a bucket.
|
||||||
|
|
||||||
|
Usage Examples:
|
||||||
|
|
||||||
|
To show the current lifecycle rules:
|
||||||
|
|
||||||
|
rclone backend lifecycle b2:bucket
|
||||||
|
|
||||||
|
This will dump something like this showing the lifecycle rules.
|
||||||
|
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"daysFromHidingToDeleting": 1,
|
||||||
|
"daysFromUploadingToHiding": null,
|
||||||
|
"fileNamePrefix": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
If there are no lifecycle rules (the default) then it will just return [].
|
||||||
|
|
||||||
|
To reset the current lifecycle rules:
|
||||||
|
|
||||||
|
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||||
|
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||||
|
|
||||||
|
This will run and then print the new lifecycle rules as above.
|
||||||
|
|
||||||
|
Rclone only lets you set lifecycles for the whole bucket with the
|
||||||
|
fileNamePrefix = "".
|
||||||
|
|
||||||
|
You can't disable versioning with B2. The best you can do is to set
|
||||||
|
the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
|
||||||
|
the config also which will mean deletions won't cause versions but
|
||||||
|
overwrites will still cause versions to be made.
|
||||||
|
|
||||||
|
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||||
|
|
||||||
|
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||||
|
`,
|
||||||
|
Opts: map[string]string{
|
||||||
|
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||||
|
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||||
|
var newRule api.LifecycleRule
|
||||||
|
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
|
||||||
|
days, err := strconv.Atoi(daysStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("bad daysFromHidingToDeleting: %w", err)
|
||||||
|
}
|
||||||
|
newRule.DaysFromHidingToDeleting = &days
|
||||||
|
}
|
||||||
|
if daysStr := opt["daysFromUploadingToHiding"]; daysStr != "" {
|
||||||
|
days, err := strconv.Atoi(daysStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("bad daysFromUploadingToHiding: %w", err)
|
||||||
|
}
|
||||||
|
newRule.DaysFromUploadingToHiding = &days
|
||||||
|
}
|
||||||
|
bucketName, _ := f.split("")
|
||||||
|
if bucketName == "" {
|
||||||
|
return nil, errors.New("bucket required")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var bucket *api.Bucket
|
||||||
|
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
|
||||||
|
bucketID, err := f.getBucketID(ctx, bucketName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_update_bucket",
|
||||||
|
}
|
||||||
|
var request = api.UpdateBucketRequest{
|
||||||
|
ID: bucketID,
|
||||||
|
AccountID: f.info.AccountID,
|
||||||
|
LifecycleRules: []api.LifecycleRule{newRule},
|
||||||
|
}
|
||||||
|
var response api.Bucket
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
|
return f.shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bucket = &response
|
||||||
|
} else {
|
||||||
|
err = f.listBucketsToFn(ctx, bucketName, func(b *api.Bucket) error {
|
||||||
|
bucket = b
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if bucket == nil {
|
||||||
|
return nil, fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
return bucket.LifecycleRules, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cleanupHelp = fs.CommandHelp{
|
||||||
|
Name: "cleanup",
|
||||||
|
Short: "Remove unfinished large file uploads.",
|
||||||
|
Long: `This command removes unfinished large file uploads of age greater than
|
||||||
|
max-age, which defaults to 24 hours.
|
||||||
|
|
||||||
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
|
it would do.
|
||||||
|
|
||||||
|
rclone backend cleanup b2:bucket/path/to/object
|
||||||
|
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||||
|
|
||||||
|
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||||
|
`,
|
||||||
|
Opts: map[string]string{
|
||||||
|
"max-age": "Max age of upload to delete",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||||
|
maxAge := defaultMaxAge
|
||||||
|
if opt["max-age"] != "" {
|
||||||
|
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("bad max-age: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, f.cleanUp(ctx, false, true, maxAge)
|
||||||
|
}
|
||||||
|
|
||||||
|
var cleanupHiddenHelp = fs.CommandHelp{
|
||||||
|
Name: "cleanup-hidden",
|
||||||
|
Short: "Remove old versions of files.",
|
||||||
|
Long: `This command removes any old hidden versions of files.
|
||||||
|
|
||||||
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
|
it would do.
|
||||||
|
|
||||||
|
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||||
|
return nil, f.cleanUp(ctx, true, false, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var commandHelp = []fs.CommandHelp{
|
||||||
|
lifecycleHelp,
|
||||||
|
cleanupHelp,
|
||||||
|
cleanupHiddenHelp,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command the backend to run a named command
|
||||||
|
//
|
||||||
|
// The command run is name
|
||||||
|
// args may be used to read arguments from
|
||||||
|
// opts may be used to read optional arguments from
|
||||||
|
//
|
||||||
|
// The result should be capable of being JSON encoded
|
||||||
|
// If it is a string or a []string it will be shown to the user
|
||||||
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||||
|
switch name {
|
||||||
|
case "lifecycle":
|
||||||
|
return f.lifecycleCommand(ctx, name, arg, opt)
|
||||||
|
case "cleanup":
|
||||||
|
return f.cleanupCommand(ctx, name, arg, opt)
|
||||||
|
case "cleanup-hidden":
|
||||||
|
return f.cleanupHiddenCommand(ctx, name, arg, opt)
|
||||||
|
default:
|
||||||
|
return nil, fs.ErrorCommandNotFound
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
_ fs.Purger = &Fs{}
|
_ fs.Purger = &Fs{}
|
||||||
_ fs.Copier = &Fs{}
|
_ fs.Copier = &Fs{}
|
||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.CleanUpper = &Fs{}
|
_ fs.CleanUpper = &Fs{}
|
||||||
_ fs.ListRer = &Fs{}
|
_ fs.ListRer = &Fs{}
|
||||||
_ fs.PublicLinker = &Fs{}
|
_ fs.PublicLinker = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.OpenChunkWriter = &Fs{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.Commander = &Fs{}
|
||||||
_ fs.IDer = &Object{}
|
_ fs.Object = &Object{}
|
||||||
|
_ fs.MimeTyper = &Object{}
|
||||||
|
_ fs.IDer = &Object{}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,14 +1,29 @@
|
|||||||
package b2
|
package b2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha1"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/b2/api"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/cache"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
|
"github.com/rclone/rclone/lib/random"
|
||||||
|
"github.com/rclone/rclone/lib/version"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test b2 string encoding
|
// Test b2 string encoding
|
||||||
// https://www.backblaze.com/b2/docs/string_encoding.html
|
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
|
||||||
|
|
||||||
var encodeTest = []struct {
|
var encodeTest = []struct {
|
||||||
fullyEncoded string
|
fullyEncoded string
|
||||||
@@ -168,3 +183,304 @@ func TestParseTimeString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
||||||
|
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
||||||
|
var headers = make(map[string]string)
|
||||||
|
for _, option := range options {
|
||||||
|
k, v := option.Header()
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
if strings.HasPrefix(k, headerPrefix) {
|
||||||
|
headers[k[len(headerPrefix):]] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
||||||
|
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
||||||
|
t.Run(what, func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
ss := fs.SizeSuffix(0)
|
||||||
|
err := ss.Set(size)
|
||||||
|
require.NoError(t, err)
|
||||||
|
original := random.String(int(ss))
|
||||||
|
|
||||||
|
contents := fstest.Gz(t, original)
|
||||||
|
mimeType := "text/html"
|
||||||
|
|
||||||
|
if chunkSize != "" {
|
||||||
|
ss := fs.SizeSuffix(0)
|
||||||
|
err := ss.Set(chunkSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = f.SetUploadChunkSize(ss)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if uploadCutoff != "" {
|
||||||
|
ss := fs.SizeSuffix(0)
|
||||||
|
err := ss.Set(uploadCutoff)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = f.SetUploadCutoff(ss)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||||
|
btime := time.Now()
|
||||||
|
metadata := fs.Metadata{
|
||||||
|
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
||||||
|
|
||||||
|
"mtime": "2009-05-06T04:05:06.499Z",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Need to specify HTTP options with the header prefix since they are passed as-is
|
||||||
|
options := []fs.OpenOption{
|
||||||
|
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
||||||
|
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
||||||
|
defer func() {
|
||||||
|
assert.NoError(t, obj.Remove(ctx))
|
||||||
|
}()
|
||||||
|
o := obj.(*Object)
|
||||||
|
gotMetadata, err := o.getMetaData(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// X-Bz-Info-a & X-Bz-Info-b
|
||||||
|
optMetadata := OpenOptionToMetaData(options)
|
||||||
|
for k, v := range optMetadata {
|
||||||
|
got := gotMetadata.Info[k]
|
||||||
|
assert.Equal(t, v, got, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mtime
|
||||||
|
for k, v := range metadata {
|
||||||
|
got := o.meta[k]
|
||||||
|
assert.Equal(t, v, got, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||||
|
|
||||||
|
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||||
|
var mtime api.Timestamp
|
||||||
|
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||||
|
|
||||||
|
// Upload time
|
||||||
|
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||||
|
dt := gotBtime.Sub(btime)
|
||||||
|
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||||
|
|
||||||
|
t.Run("GzipEncoding", func(t *testing.T) {
|
||||||
|
// Test that the gzipped file we uploaded can be
|
||||||
|
// downloaded
|
||||||
|
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||||
|
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||||
|
assert.Equal(t, wantContents, gotContents)
|
||||||
|
assert.Equal(t, wantSize, o.Size())
|
||||||
|
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, wantHash, gotHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("NoDecompress", func(t *testing.T) {
|
||||||
|
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||||
|
// 1 kB regular file
|
||||||
|
f.internalTestMetadata(t, "1kiB", "", "")
|
||||||
|
|
||||||
|
// 10 MiB large file
|
||||||
|
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
||||||
|
}
|
||||||
|
|
||||||
|
func sha1Sum(t *testing.T, s string) string {
|
||||||
|
hash := sha1.Sum([]byte(s))
|
||||||
|
return fmt.Sprintf("%x", hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is adapted from the s3 equivalent.
|
||||||
|
func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Small pause to make the LastModified different since AWS
|
||||||
|
// only seems to track them to 1 second granularity
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
// Create an object
|
||||||
|
const dirName = "versions"
|
||||||
|
const fileName = dirName + "/" + "test-versions.txt"
|
||||||
|
contents := random.String(100)
|
||||||
|
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
|
defer func() {
|
||||||
|
assert.NoError(t, obj.Remove(ctx))
|
||||||
|
}()
|
||||||
|
objMetadata, err := obj.(*Object).getMetaData(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Small pause
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
// Remove it
|
||||||
|
assert.NoError(t, obj.Remove(ctx))
|
||||||
|
|
||||||
|
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
// And create it with different size and contents
|
||||||
|
newContents := random.String(101)
|
||||||
|
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
|
||||||
|
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
|
||||||
|
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("Versions", func(t *testing.T) {
|
||||||
|
// Set --b2-versions for this test
|
||||||
|
f.opt.Versions = true
|
||||||
|
defer func() {
|
||||||
|
f.opt.Versions = false
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Read the contents
|
||||||
|
entries, err := f.List(ctx, dirName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
tests := 0
|
||||||
|
var fileNameVersion string
|
||||||
|
for _, entry := range entries {
|
||||||
|
t.Log(entry)
|
||||||
|
remote := entry.Remote()
|
||||||
|
if remote == fileName {
|
||||||
|
t.Run("ReadCurrent", func(t *testing.T) {
|
||||||
|
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
||||||
|
})
|
||||||
|
tests++
|
||||||
|
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
|
||||||
|
t.Run("ReadVersion", func(t *testing.T) {
|
||||||
|
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
||||||
|
})
|
||||||
|
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
|
||||||
|
fileNameVersion = remote
|
||||||
|
tests++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.Equal(t, 2, tests, "object missing from listing")
|
||||||
|
|
||||||
|
// Check we can read the object with a version suffix
|
||||||
|
t.Run("NewObject", func(t *testing.T) {
|
||||||
|
o, err := f.NewObject(ctx, fileNameVersion)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, o)
|
||||||
|
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check we can make a NewFs from that object with a version suffix
|
||||||
|
t.Run("NewFs", func(t *testing.T) {
|
||||||
|
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
|
||||||
|
// Make sure --b2-versions is set in the config of the new remote
|
||||||
|
fs.Debugf(nil, "oldPath = %q", newPath)
|
||||||
|
lastColon := strings.LastIndex(newPath, ":")
|
||||||
|
require.True(t, lastColon >= 0)
|
||||||
|
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
|
||||||
|
fs.Debugf(nil, "newPath = %q", newPath)
|
||||||
|
fNew, err := cache.Get(ctx, newPath)
|
||||||
|
// This should return pointing to a file
|
||||||
|
require.Equal(t, fs.ErrorIsFile, err)
|
||||||
|
require.NotNil(t, fNew)
|
||||||
|
// With the directory above
|
||||||
|
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("VersionAt", func(t *testing.T) {
|
||||||
|
// We set --b2-version-at for this test so make sure we reset it at the end
|
||||||
|
defer func() {
|
||||||
|
f.opt.VersionAt = fs.Time{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var (
|
||||||
|
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
|
||||||
|
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, test := range []struct {
|
||||||
|
what string
|
||||||
|
at time.Time
|
||||||
|
want []fstest.Item
|
||||||
|
wantErr error
|
||||||
|
wantSize int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
what: "Before",
|
||||||
|
at: firstObjectTime.Add(-time.Second),
|
||||||
|
want: fstests.InternalTestFiles,
|
||||||
|
wantErr: fs.ErrorObjectNotFound,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
what: "AfterOne",
|
||||||
|
at: firstObjectTime.Add(time.Second),
|
||||||
|
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
|
||||||
|
wantSize: 100,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
what: "AfterDelete",
|
||||||
|
at: secondObjectTime.Add(-time.Second),
|
||||||
|
want: fstests.InternalTestFiles,
|
||||||
|
wantErr: fs.ErrorObjectNotFound,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
what: "AfterTwo",
|
||||||
|
at: secondObjectTime.Add(time.Second),
|
||||||
|
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
|
||||||
|
wantSize: 101,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(test.what, func(t *testing.T) {
|
||||||
|
f.opt.VersionAt = fs.Time(test.at)
|
||||||
|
t.Run("List", func(t *testing.T) {
|
||||||
|
fstest.CheckListing(t, f, test.want)
|
||||||
|
})
|
||||||
|
// b2 NewObject doesn't work with VersionAt
|
||||||
|
//t.Run("NewObject", func(t *testing.T) {
|
||||||
|
// gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||||
|
// assert.Equal(t, test.wantErr, gotErr)
|
||||||
|
// if gotErr == nil {
|
||||||
|
// assert.Equal(t, test.wantSize, gotObj.Size())
|
||||||
|
// }
|
||||||
|
//})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Cleanup", func(t *testing.T) {
|
||||||
|
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||||
|
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||||
|
fstest.CheckListing(t, f, items)
|
||||||
|
// Set --b2-versions for this test
|
||||||
|
f.opt.Versions = true
|
||||||
|
defer func() {
|
||||||
|
f.opt.Versions = false
|
||||||
|
}()
|
||||||
|
fstest.CheckListing(t, f, items)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Purge gets tested later
|
||||||
|
}
|
||||||
|
|
||||||
|
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||||
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
|
t.Run("Metadata", f.InternalTestMetadata)
|
||||||
|
t.Run("Versions", f.InternalTestVersions)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -28,7 +28,12 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadCutoff(cs)
|
return f.setUploadCutoff(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||||
|
return f.setCopyCutoff(cs)
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||||
|
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
// Upload large files for b2
|
// Upload large files for b2
|
||||||
//
|
//
|
||||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
|
||||||
|
|
||||||
package b2
|
package b2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@@ -14,7 +13,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
"github.com/rclone/rclone/backend/b2/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -80,36 +78,31 @@ type largeUpload struct {
|
|||||||
wrap accounting.WrapFn // account parts being transferred
|
wrap accounting.WrapFn // account parts being transferred
|
||||||
id string // ID of the file being uploaded
|
id string // ID of the file being uploaded
|
||||||
size int64 // total size
|
size int64 // total size
|
||||||
parts int64 // calculated number of parts, if known
|
parts int // calculated number of parts, if known
|
||||||
|
sha1smu sync.Mutex // mutex to protect sha1s
|
||||||
sha1s []string // slice of SHA1s for each part
|
sha1s []string // slice of SHA1s for each part
|
||||||
uploadMu sync.Mutex // lock for upload variable
|
uploadMu sync.Mutex // lock for upload variable
|
||||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||||
chunkSize int64 // chunk size to use
|
chunkSize int64 // chunk size to use
|
||||||
src *Object // if copying, object we are reading from
|
src *Object // if copying, object we are reading from
|
||||||
|
info *api.FileInfo // final response with info about the object
|
||||||
}
|
}
|
||||||
|
|
||||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||||
//
|
//
|
||||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
parts := int64(0)
|
parts := 0
|
||||||
sha1SliceSize := int64(maxParts)
|
|
||||||
chunkSize := defaultChunkSize
|
chunkSize := defaultChunkSize
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||||
} else {
|
} else {
|
||||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
||||||
parts = size / int64(chunkSize)
|
parts = int(size / int64(chunkSize))
|
||||||
if size%int64(chunkSize) != 0 {
|
if size%int64(chunkSize) != 0 {
|
||||||
parts++
|
parts++
|
||||||
}
|
}
|
||||||
sha1SliceSize = parts
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_start_large_file",
|
|
||||||
}
|
}
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
bucketID, err := f.getBucketID(ctx, bucket)
|
bucketID, err := f.getBucketID(ctx, bucket)
|
||||||
@@ -120,12 +113,27 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
BucketID: bucketID,
|
BucketID: bucketID,
|
||||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||||
}
|
}
|
||||||
|
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
||||||
if newInfo == nil {
|
if newInfo == nil {
|
||||||
modTime := src.ModTime(ctx)
|
modTime, err := o.getModTime(ctx, src, options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
request.ContentType = fs.MimeType(ctx, src)
|
request.ContentType = fs.MimeType(ctx, src)
|
||||||
request.Info = map[string]string{
|
request.Info = map[string]string{
|
||||||
timeKey: timeString(modTime),
|
timeKey: timeString(modTime),
|
||||||
}
|
}
|
||||||
|
// Custom upload headers - remove header prefix since they are sent in the body
|
||||||
|
for _, option := range options {
|
||||||
|
k, v := option.Header()
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
if strings.HasPrefix(k, headerPrefix) {
|
||||||
|
request.Info[k[len(headerPrefix):]] = v
|
||||||
|
} else {
|
||||||
|
optionsToSend = append(optionsToSend, option)
|
||||||
|
}
|
||||||
|
}
|
||||||
// Set the SHA1 if known
|
// Set the SHA1 if known
|
||||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||||
@@ -136,6 +144,11 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
request.ContentType = newInfo.ContentType
|
request.ContentType = newInfo.ContentType
|
||||||
request.Info = newInfo.Info
|
request.Info = newInfo.Info
|
||||||
}
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_start_large_file",
|
||||||
|
Options: optionsToSend,
|
||||||
|
}
|
||||||
var response api.StartLargeFileResponse
|
var response api.StartLargeFileResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
@@ -152,7 +165,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
id: response.ID,
|
id: response.ID,
|
||||||
size: size,
|
size: size,
|
||||||
parts: parts,
|
parts: parts,
|
||||||
sha1s: make([]string, sha1SliceSize),
|
sha1s: make([]string, 0, 16),
|
||||||
chunkSize: int64(chunkSize),
|
chunkSize: int64(chunkSize),
|
||||||
}
|
}
|
||||||
// unwrap the accounting from the input, we use wrap to put it
|
// unwrap the accounting from the input, we use wrap to put it
|
||||||
@@ -171,24 +184,26 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
// This should be returned with returnUploadURL when finished
|
// This should be returned with returnUploadURL when finished
|
||||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
||||||
up.uploadMu.Lock()
|
up.uploadMu.Lock()
|
||||||
defer up.uploadMu.Unlock()
|
if len(up.uploads) > 0 {
|
||||||
if len(up.uploads) == 0 {
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_get_upload_part_url",
|
|
||||||
}
|
|
||||||
var request = api.GetUploadPartURLRequest{
|
|
||||||
ID: up.id,
|
|
||||||
}
|
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
|
||||||
return up.f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||||
|
up.uploadMu.Unlock()
|
||||||
|
return upload, nil
|
||||||
|
}
|
||||||
|
up.uploadMu.Unlock()
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_get_upload_part_url",
|
||||||
|
}
|
||||||
|
var request = api.GetUploadPartURLRequest{
|
||||||
|
ID: up.id,
|
||||||
|
}
|
||||||
|
err = up.f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||||
|
return up.f.shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||||
}
|
}
|
||||||
return upload, nil
|
return upload, nil
|
||||||
}
|
}
|
||||||
@@ -203,10 +218,39 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
|||||||
up.uploadMu.Unlock()
|
up.uploadMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transfer a chunk
|
// Add an sha1 to the being built up sha1s
|
||||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
up.sha1smu.Lock()
|
||||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
defer up.sha1smu.Unlock()
|
||||||
|
if len(up.sha1s) < chunkNumber+1 {
|
||||||
|
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
|
||||||
|
}
|
||||||
|
up.sha1s[chunkNumber] = sha1
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||||
|
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
|
||||||
|
// Only account after the checksum reads have been done
|
||||||
|
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||||
|
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||||
|
// multiple of what it should be, increase or decrease this number.
|
||||||
|
do.DelayAccounting(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = up.f.pacer.Call(func() (bool, error) {
|
||||||
|
// Discover the size by seeking to the end
|
||||||
|
size, err = reader.Seek(0, io.SeekEnd)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// rewind the reader on retry and after reading size
|
||||||
|
_, err = reader.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
|
||||||
|
|
||||||
// Get upload URL
|
// Get upload URL
|
||||||
upload, err := up.getUploadURL(ctx)
|
upload, err := up.getUploadURL(ctx)
|
||||||
@@ -214,8 +258,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
in := newHashAppendingReader(reader, sha1.New())
|
||||||
size := int64(len(body)) + int64(in.AdditionalLength())
|
sizeWithHash := size + int64(in.AdditionalLength())
|
||||||
|
|
||||||
// Authorization
|
// Authorization
|
||||||
//
|
//
|
||||||
@@ -245,10 +289,10 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
|||||||
Body: up.wrap(in),
|
Body: up.wrap(in),
|
||||||
ExtraHeaders: map[string]string{
|
ExtraHeaders: map[string]string{
|
||||||
"Authorization": upload.AuthorizationToken,
|
"Authorization": upload.AuthorizationToken,
|
||||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
|
||||||
sha1Header: "hex_digits_at_end",
|
sha1Header: "hex_digits_at_end",
|
||||||
},
|
},
|
||||||
ContentLength: &size,
|
ContentLength: &sizeWithHash,
|
||||||
}
|
}
|
||||||
|
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
@@ -256,7 +300,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
|||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
|
||||||
}
|
}
|
||||||
// On retryable error clear PartUploadURL
|
// On retryable error clear PartUploadURL
|
||||||
if retry {
|
if retry {
|
||||||
@@ -264,30 +308,30 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
|||||||
upload = nil
|
upload = nil
|
||||||
}
|
}
|
||||||
up.returnUploadURL(upload)
|
up.returnUploadURL(upload)
|
||||||
up.sha1s[part-1] = in.HexSum()
|
up.addSha1(chunkNumber, in.HexSum())
|
||||||
return retry, err
|
return retry, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
|
||||||
}
|
}
|
||||||
return err
|
return size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy a chunk
|
// Copy a chunk
|
||||||
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
|
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_copy_part",
|
Path: "/b2_copy_part",
|
||||||
}
|
}
|
||||||
offset := (part - 1) * up.chunkSize // where we are in the source file
|
offset := int64(part) * up.chunkSize // where we are in the source file
|
||||||
var request = api.CopyPartRequest{
|
var request = api.CopyPartRequest{
|
||||||
SourceID: up.src.id,
|
SourceID: up.src.id,
|
||||||
LargeFileID: up.id,
|
LargeFileID: up.id,
|
||||||
PartNumber: part,
|
PartNumber: int64(part + 1),
|
||||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||||
}
|
}
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
@@ -296,7 +340,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||||
}
|
}
|
||||||
up.sha1s[part-1] = response.SHA1
|
up.addSha1(part, response.SHA1)
|
||||||
return retry, err
|
return retry, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -307,8 +351,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// finish closes off the large upload
|
// Close closes off the large upload
|
||||||
func (up *largeUpload) finish(ctx context.Context) error {
|
func (up *largeUpload) Close(ctx context.Context) error {
|
||||||
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -326,11 +370,12 @@ func (up *largeUpload) finish(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.o.decodeMetaDataFileInfo(&response)
|
up.info = &response
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// cancel aborts the large upload
|
// Abort aborts the large upload
|
||||||
func (up *largeUpload) cancel(ctx context.Context) error {
|
func (up *largeUpload) Abort(ctx context.Context) error {
|
||||||
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -355,157 +400,105 @@ func (up *largeUpload) cancel(ctx context.Context) error {
|
|||||||
// reaches EOF.
|
// reaches EOF.
|
||||||
//
|
//
|
||||||
// Note that initialUploadBlock must be returned to f.putBuf()
|
// Note that initialUploadBlock must be returned to f.putBuf()
|
||||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
|
||||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
||||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||||
var (
|
var (
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
g, gCtx = errgroup.WithContext(ctx)
|
||||||
hasMoreParts = true
|
hasMoreParts = true
|
||||||
)
|
)
|
||||||
up.size = int64(len(initialUploadBlock))
|
up.size = initialUploadBlock.Size()
|
||||||
g.Go(func() error {
|
up.parts = 0
|
||||||
for part := int64(1); hasMoreParts; part++ {
|
for part := 0; hasMoreParts; part++ {
|
||||||
// Get a block of memory from the pool and token which limits concurrency.
|
// Get a block of memory from the pool and token which limits concurrency.
|
||||||
var buf []byte
|
var rw *pool.RW
|
||||||
if part == 1 {
|
if part == 0 {
|
||||||
buf = initialUploadBlock
|
rw = initialUploadBlock
|
||||||
} else {
|
} else {
|
||||||
buf = up.f.getBuf(false)
|
rw = up.f.getRW(false)
|
||||||
}
|
|
||||||
|
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
|
||||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
|
||||||
if gCtx.Err() != nil {
|
|
||||||
up.f.putBuf(buf, false)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the chunk
|
|
||||||
var n int
|
|
||||||
if part == 1 {
|
|
||||||
n = len(buf)
|
|
||||||
} else {
|
|
||||||
n, err = io.ReadFull(up.in, buf)
|
|
||||||
if err == io.ErrUnexpectedEOF {
|
|
||||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
|
||||||
buf = buf[:n]
|
|
||||||
hasMoreParts = false
|
|
||||||
} else if err == io.EOF {
|
|
||||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
|
||||||
up.f.putBuf(buf, false)
|
|
||||||
return nil
|
|
||||||
} else if err != nil {
|
|
||||||
// other kinds of errors indicate failure
|
|
||||||
up.f.putBuf(buf, false)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep stats up to date
|
|
||||||
up.parts = part
|
|
||||||
up.size += int64(n)
|
|
||||||
if part > maxParts {
|
|
||||||
up.f.putBuf(buf, false)
|
|
||||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
|
||||||
}
|
|
||||||
|
|
||||||
part := part // for the closure
|
|
||||||
g.Go(func() (err error) {
|
|
||||||
defer up.f.putBuf(buf, false)
|
|
||||||
return up.transferChunk(gCtx, part, buf)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
})
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
|
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||||
|
if gCtx.Err() != nil {
|
||||||
|
up.f.putRW(rw)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the chunk
|
||||||
|
var n int64
|
||||||
|
if part == 0 {
|
||||||
|
n = rw.Size()
|
||||||
|
} else {
|
||||||
|
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
||||||
|
if err == io.EOF {
|
||||||
|
if n == 0 {
|
||||||
|
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
|
||||||
|
up.f.putRW(rw)
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
|
||||||
|
}
|
||||||
|
hasMoreParts = false
|
||||||
|
} else if err != nil {
|
||||||
|
// other kinds of errors indicate failure
|
||||||
|
up.f.putRW(rw)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep stats up to date
|
||||||
|
up.parts += 1
|
||||||
|
up.size += n
|
||||||
|
if part > maxParts {
|
||||||
|
up.f.putRW(rw)
|
||||||
|
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||||
|
}
|
||||||
|
|
||||||
|
part := part // for the closure
|
||||||
|
g.Go(func() (err error) {
|
||||||
|
defer up.f.putRW(rw)
|
||||||
|
_, err = up.WriteChunk(gCtx, part, rw)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
err = g.Wait()
|
err = g.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
up.sha1s = up.sha1s[:up.parts]
|
return up.Close(ctx)
|
||||||
return up.finish(ctx)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload uploads the chunks from the input
|
// Copy the chunks from the source to the destination
|
||||||
func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
||||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
||||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||||
var (
|
var (
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
g, gCtx = errgroup.WithContext(ctx)
|
||||||
remaining = up.size
|
remaining = up.size
|
||||||
uploadPool *pool.Pool
|
|
||||||
ci = fs.GetConfig(ctx)
|
|
||||||
)
|
)
|
||||||
// If using large chunk size then make a temporary pool
|
g.SetLimit(up.f.opt.UploadConcurrency)
|
||||||
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
for part := 0; part < up.parts; part++ {
|
||||||
uploadPool = up.f.pool
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
} else {
|
// gCtx is cancelled. There is no point in copying all the other parts.
|
||||||
uploadPool = pool.New(
|
if gCtx.Err() != nil {
|
||||||
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
break
|
||||||
int(up.chunkSize),
|
|
||||||
ci.Transfers,
|
|
||||||
up.f.opt.MemoryPoolUseMmap,
|
|
||||||
)
|
|
||||||
defer uploadPool.Flush()
|
|
||||||
}
|
|
||||||
// Get an upload token and a buffer
|
|
||||||
getBuf := func() (buf []byte) {
|
|
||||||
up.f.getBuf(true)
|
|
||||||
if !up.doCopy {
|
|
||||||
buf = uploadPool.Get()
|
|
||||||
}
|
}
|
||||||
return buf
|
|
||||||
}
|
reqSize := remaining
|
||||||
// Put an upload token and a buffer
|
if reqSize >= up.chunkSize {
|
||||||
putBuf := func(buf []byte) {
|
reqSize = up.chunkSize
|
||||||
if !up.doCopy {
|
|
||||||
uploadPool.Put(buf)
|
|
||||||
}
|
}
|
||||||
up.f.putBuf(nil, true)
|
|
||||||
|
part := part // for the closure
|
||||||
|
g.Go(func() (err error) {
|
||||||
|
return up.copyChunk(gCtx, part, reqSize)
|
||||||
|
})
|
||||||
|
remaining -= reqSize
|
||||||
}
|
}
|
||||||
g.Go(func() error {
|
|
||||||
for part := int64(1); part <= up.parts; part++ {
|
|
||||||
// Get a block of memory from the pool and token which limits concurrency.
|
|
||||||
buf := getBuf()
|
|
||||||
|
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
|
||||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
|
||||||
if gCtx.Err() != nil {
|
|
||||||
putBuf(buf)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
reqSize := remaining
|
|
||||||
if reqSize >= up.chunkSize {
|
|
||||||
reqSize = up.chunkSize
|
|
||||||
}
|
|
||||||
|
|
||||||
if !up.doCopy {
|
|
||||||
// Read the chunk
|
|
||||||
buf = buf[:reqSize]
|
|
||||||
_, err = io.ReadFull(up.in, buf)
|
|
||||||
if err != nil {
|
|
||||||
putBuf(buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
part := part // for the closure
|
|
||||||
g.Go(func() (err error) {
|
|
||||||
defer putBuf(buf)
|
|
||||||
if !up.doCopy {
|
|
||||||
err = up.transferChunk(gCtx, part, buf)
|
|
||||||
} else {
|
|
||||||
err = up.copyChunk(gCtx, part, reqSize)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
remaining -= reqSize
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
err = g.Wait()
|
err = g.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.finish(ctx)
|
return up.Close(ctx)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ func (e *Error) Error() string {
|
|||||||
out += ": " + e.Message
|
out += ": " + e.Message
|
||||||
}
|
}
|
||||||
if e.ContextInfo != nil {
|
if e.ContextInfo != nil {
|
||||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
|
|||||||
// ItemFields are the fields needed for FileInfo
|
// ItemFields are the fields needed for FileInfo
|
||||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
||||||
|
|
||||||
// Types of things in Item
|
// Types of things in Item/ItemMini
|
||||||
const (
|
const (
|
||||||
ItemTypeFolder = "folder"
|
ItemTypeFolder = "folder"
|
||||||
ItemTypeFile = "file"
|
ItemTypeFile = "file"
|
||||||
@@ -72,20 +72,31 @@ const (
|
|||||||
ItemStatusDeleted = "deleted"
|
ItemStatusDeleted = "deleted"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ItemMini is a subset of the elements in a full Item returned by some API calls
|
||||||
|
type ItemMini struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
SequenceID int64 `json:"sequence_id,string"`
|
||||||
|
Etag string `json:"etag"`
|
||||||
|
SHA1 string `json:"sha1"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||||
type Item struct {
|
type Item struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
SequenceID string `json:"sequence_id"`
|
SequenceID int64 `json:"sequence_id,string"`
|
||||||
Etag string `json:"etag"`
|
Etag string `json:"etag"`
|
||||||
SHA1 string `json:"sha1"`
|
SHA1 string `json:"sha1"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||||
CreatedAt Time `json:"created_at"`
|
CreatedAt Time `json:"created_at"`
|
||||||
ModifiedAt Time `json:"modified_at"`
|
ModifiedAt Time `json:"modified_at"`
|
||||||
ContentCreatedAt Time `json:"content_created_at"`
|
ContentCreatedAt Time `json:"content_created_at"`
|
||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||||
|
Parent ItemMini `json:"parent"`
|
||||||
SharedLink struct {
|
SharedLink struct {
|
||||||
URL string `json:"url,omitempty"`
|
URL string `json:"url,omitempty"`
|
||||||
Access string `json:"access,omitempty"`
|
Access string `json:"access,omitempty"`
|
||||||
@@ -156,19 +167,7 @@ type PreUploadCheckResponse struct {
|
|||||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
// PreUploadCheckConflict is returned in the ContextInfo error field
|
||||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
// from PreUploadCheck when the error code is "item_name_in_use"
|
||||||
type PreUploadCheckConflict struct {
|
type PreUploadCheckConflict struct {
|
||||||
Conflicts struct {
|
Conflicts ItemMini `json:"conflicts"`
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
FileVersion struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
Sha1 string `json:"sha1"`
|
|
||||||
} `json:"file_version"`
|
|
||||||
SequenceID string `json:"sequence_id"`
|
|
||||||
Etag string `json:"etag"`
|
|
||||||
Sha1 string `json:"sha1"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
} `json:"conflicts"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateFileModTime is used in Update File Info
|
// UpdateFileModTime is used in Update File Info
|
||||||
@@ -281,3 +280,30 @@ type User struct {
|
|||||||
Address string `json:"address"`
|
Address string `json:"address"`
|
||||||
AvatarURL string `json:"avatar_url"`
|
AvatarURL string `json:"avatar_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileTreeChangeEventTypes are the events that can require cache invalidation
|
||||||
|
var FileTreeChangeEventTypes = map[string]struct{}{
|
||||||
|
"ITEM_COPY": {},
|
||||||
|
"ITEM_CREATE": {},
|
||||||
|
"ITEM_MAKE_CURRENT_VERSION": {},
|
||||||
|
"ITEM_MODIFY": {},
|
||||||
|
"ITEM_MOVE": {},
|
||||||
|
"ITEM_RENAME": {},
|
||||||
|
"ITEM_TRASH": {},
|
||||||
|
"ITEM_UNDELETE_VIA_TRASH": {},
|
||||||
|
"ITEM_UPLOAD": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event is an array element in the response returned from /events
|
||||||
|
type Event struct {
|
||||||
|
EventType string `json:"event_type"`
|
||||||
|
EventID string `json:"event_id"`
|
||||||
|
Source Item `json:"source"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Events is returned from /events
|
||||||
|
type Events struct {
|
||||||
|
ChunkSize int64 `json:"chunk_size"`
|
||||||
|
Entries []Event `json:"entries"`
|
||||||
|
NextStreamPosition int64 `json:"next_stream_position"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/jwtutil"
|
"github.com/rclone/rclone/lib/jwtutil"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"github.com/youmark/pkcs8"
|
"github.com/youmark/pkcs8"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
@@ -107,16 +108,18 @@ func init() {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "root_folder_id",
|
Name: "root_folder_id",
|
||||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||||
Default: "0",
|
Default: "0",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "box_config_file",
|
Name: "box_config_file",
|
||||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||||
}, {
|
}, {
|
||||||
Name: "access_token",
|
Name: "access_token",
|
||||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "box_sub_type",
|
Name: "box_sub_type",
|
||||||
Default: "user",
|
Default: "user",
|
||||||
@@ -147,6 +150,23 @@ func init() {
|
|||||||
Default: "",
|
Default: "",
|
||||||
Help: "Only show items owned by the login (email address) passed in.",
|
Help: "Only show items owned by the login (email address) passed in.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "impersonate",
|
||||||
|
Default: "",
|
||||||
|
Help: `Impersonate this user ID when using a service account.
|
||||||
|
|
||||||
|
Setting this flag allows rclone, when using a JWT service account, to
|
||||||
|
act on behalf of another user by setting the as-user header.
|
||||||
|
|
||||||
|
The user ID is the Box identifier for a user. User IDs can found for
|
||||||
|
any user via the GET /users endpoint, which is only available to
|
||||||
|
admins, or by calling the GET /users/me endpoint with an authenticated
|
||||||
|
user session.
|
||||||
|
|
||||||
|
See: https://developer.box.com/guides/authentication/jwt/as-user/
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -237,7 +257,6 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||||
|
|
||||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||||
if len(rest) > 0 {
|
if len(rest) > 0 {
|
||||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||||
@@ -260,19 +279,29 @@ type Options struct {
|
|||||||
AccessToken string `config:"access_token"`
|
AccessToken string `config:"access_token"`
|
||||||
ListChunk int `config:"list_chunk"`
|
ListChunk int `config:"list_chunk"`
|
||||||
OwnedBy string `config:"owned_by"`
|
OwnedBy string `config:"owned_by"`
|
||||||
|
Impersonate string `config:"impersonate"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ItemMeta defines metadata we cache for each Item ID
|
||||||
|
type ItemMeta struct {
|
||||||
|
SequenceID int64 // the most recent event processed for this item
|
||||||
|
ParentID string // ID of the parent directory of this item
|
||||||
|
Name string // leaf name of this item
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote box
|
// Fs represents a remote box
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
|
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
|
||||||
|
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a box object
|
// Object describes a box object
|
||||||
@@ -351,7 +380,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
@@ -360,20 +389,30 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
|
// Use preupload to find the ID
|
||||||
if strings.EqualFold(item.Name, leaf) {
|
itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1)
|
||||||
info = item
|
if err != nil {
|
||||||
return true
|
return nil, err
|
||||||
}
|
}
|
||||||
return false
|
if itemMini == nil {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we have the ID we can look up the object proper
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/files/" + itemMini.ID,
|
||||||
|
Parameters: fieldsValue(),
|
||||||
|
}
|
||||||
|
var item api.Item
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.srv.CallJSON(ctx, &opts, nil, &item)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !found {
|
return &item, nil
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return info, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// errorHandler parses a non 2xx error response into an error
|
// errorHandler parses a non 2xx error response into an error
|
||||||
@@ -420,12 +459,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
|
itemMetaCacheMu: new(sync.Mutex),
|
||||||
|
itemMetaCache: make(map[string]ItemMeta),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
@@ -438,6 +479,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If using impersonate set an as-user header
|
||||||
|
if f.opt.Impersonate != "" {
|
||||||
|
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
||||||
|
}
|
||||||
|
|
||||||
jsonFile, ok := m.Get("box_config_file")
|
jsonFile, ok := m.Get("box_config_file")
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||||
|
|
||||||
@@ -573,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//fmt.Printf("...Error %v\n", err)
|
// fmt.Printf("...Error %v\n", err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
// fmt.Printf("...Id %q\n", *info.Id)
|
// fmt.Printf("...Id %q\n", *info.Id)
|
||||||
@@ -680,6 +726,17 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
entries = append(entries, o)
|
entries = append(entries, o)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cache some metadata for this Item to help us process events later
|
||||||
|
// on. In particular, the box event API does not provide the old path
|
||||||
|
// of the Item when it is renamed/deleted/moved/etc.
|
||||||
|
f.itemMetaCacheMu.Lock()
|
||||||
|
cachedItemMeta, found := f.itemMetaCache[info.ID]
|
||||||
|
if !found || cachedItemMeta.SequenceID < info.SequenceID {
|
||||||
|
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
|
||||||
|
}
|
||||||
|
f.itemMetaCacheMu.Unlock()
|
||||||
|
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -715,7 +772,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||||||
//
|
//
|
||||||
// It returns "", nil if the file is good to go
|
// It returns "", nil if the file is good to go
|
||||||
// It returns "ID", nil if the file must be updated
|
// It returns "ID", nil if the file must be updated
|
||||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) {
|
||||||
check := api.PreUploadCheck{
|
check := api.PreUploadCheck{
|
||||||
Name: f.opt.Enc.FromStandardName(leaf),
|
Name: f.opt.Enc.FromStandardName(leaf),
|
||||||
Parent: api.Parent{
|
Parent: api.Parent{
|
||||||
@@ -740,16 +797,16 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
|||||||
var conflict api.PreUploadCheckConflict
|
var conflict api.PreUploadCheckConflict
|
||||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
||||||
}
|
}
|
||||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||||
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
|
return nil, fs.ErrorIsDir
|
||||||
}
|
}
|
||||||
return conflict.Conflicts.ID, nil
|
return &conflict.Conflicts, nil
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("pre-upload check: %w", err)
|
return nil, fmt.Errorf("pre-upload check: %w", err)
|
||||||
}
|
}
|
||||||
return "", nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
@@ -770,11 +827,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
|
|
||||||
// Preflight check the upload, which returns the ID if the
|
// Preflight check the upload, which returns the ID if the
|
||||||
// object already exists
|
// object already exists
|
||||||
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if ID == "" {
|
if item == nil {
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -782,7 +839,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
id: ID,
|
id: item.ID,
|
||||||
}
|
}
|
||||||
return o, o.Update(ctx, in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
@@ -909,6 +966,26 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check if dest already exists
|
||||||
|
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if item != nil { // dest already exists, need to copy to temp name and then move
|
||||||
|
tempSuffix := "-rclone-copy-" + random.String(8)
|
||||||
|
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
|
||||||
|
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
|
||||||
|
err = f.deleteObject(ctx, item.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.Move(ctx, tempObj, remote)
|
||||||
|
}
|
||||||
|
|
||||||
// Copy the object
|
// Copy the object
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -1119,7 +1196,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
|||||||
// CleanUp empties the trash
|
// CleanUp empties the trash
|
||||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||||
var (
|
var (
|
||||||
deleteErrors = int64(0)
|
deleteErrors atomic.Uint64
|
||||||
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
)
|
)
|
||||||
@@ -1135,7 +1212,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
||||||
atomic.AddInt64(&deleteErrors, 1)
|
deleteErrors.Add(1)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
} else {
|
} else {
|
||||||
@@ -1144,12 +1221,283 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
if deleteErrors != 0 {
|
if deleteErrors.Load() != 0 {
|
||||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
|
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown shutdown the fs
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
f.tokenRenewer.Shutdown()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangeNotify calls the passed function with a path that has had changes.
|
||||||
|
// If the implementation uses polling, it should adhere to the given interval.
|
||||||
|
//
|
||||||
|
// Automatically restarts itself in case of unexpected behavior of the remote.
|
||||||
|
//
|
||||||
|
// Close the returned channel to stop being notified.
|
||||||
|
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||||
|
go func() {
|
||||||
|
// get the `stream_position` early so all changes from now on get processed
|
||||||
|
streamPosition, err := f.changeNotifyStreamPosition(ctx)
|
||||||
|
if err != nil {
|
||||||
|
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// box can send duplicate Event IDs. Use this map to track and filter
|
||||||
|
// the ones we've already processed.
|
||||||
|
processedEventIDs := make(map[string]time.Time)
|
||||||
|
|
||||||
|
var ticker *time.Ticker
|
||||||
|
var tickerC <-chan time.Time
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case pollInterval, ok := <-pollIntervalChan:
|
||||||
|
if !ok {
|
||||||
|
if ticker != nil {
|
||||||
|
ticker.Stop()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ticker != nil {
|
||||||
|
ticker.Stop()
|
||||||
|
ticker, tickerC = nil, nil
|
||||||
|
}
|
||||||
|
if pollInterval != 0 {
|
||||||
|
ticker = time.NewTicker(pollInterval)
|
||||||
|
tickerC = ticker.C
|
||||||
|
}
|
||||||
|
case <-tickerC:
|
||||||
|
if streamPosition == "" {
|
||||||
|
streamPosition, err = f.changeNotifyStreamPosition(ctx)
|
||||||
|
if err != nil {
|
||||||
|
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Garbage collect EventIDs older than 1 minute
|
||||||
|
for eventID, timestamp := range processedEventIDs {
|
||||||
|
if time.Since(timestamp) > time.Minute {
|
||||||
|
delete(processedEventIDs, eventID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
|
||||||
|
if err != nil {
|
||||||
|
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/events",
|
||||||
|
Parameters: fieldsValue(),
|
||||||
|
}
|
||||||
|
opts.Parameters.Set("stream_position", "now")
|
||||||
|
opts.Parameters.Set("stream_type", "changes")
|
||||||
|
|
||||||
|
var result api.Events
|
||||||
|
var resp *http.Response
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return strconv.FormatInt(result.NextStreamPosition, 10), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempts to construct the full path for an object, given the ID of its
|
||||||
|
// parent directory and the name of the object.
|
||||||
|
//
|
||||||
|
// Can return "" if the parentID is not currently in the directory cache.
|
||||||
|
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
|
||||||
|
fullPath = ""
|
||||||
|
name := f.opt.Enc.ToStandardName(childName)
|
||||||
|
if parentID != "" {
|
||||||
|
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
|
||||||
|
if len(parentDir) > 0 {
|
||||||
|
fullPath = parentDir + "/" + name
|
||||||
|
} else {
|
||||||
|
fullPath = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No parent, this object is at the root
|
||||||
|
fullPath = name
|
||||||
|
}
|
||||||
|
return fullPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
|
||||||
|
nextStreamPosition = streamPosition
|
||||||
|
|
||||||
|
for {
|
||||||
|
limit := f.opt.ListChunk
|
||||||
|
|
||||||
|
// box only allows a max of 500 events
|
||||||
|
if limit > 500 {
|
||||||
|
limit = 500
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/events",
|
||||||
|
Parameters: fieldsValue(),
|
||||||
|
}
|
||||||
|
opts.Parameters.Set("stream_position", nextStreamPosition)
|
||||||
|
opts.Parameters.Set("stream_type", "changes")
|
||||||
|
opts.Parameters.Set("limit", strconv.Itoa(limit))
|
||||||
|
|
||||||
|
var result api.Events
|
||||||
|
var resp *http.Response
|
||||||
|
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.ChunkSize != int64(len(result.Entries)) {
|
||||||
|
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
|
||||||
|
}
|
||||||
|
|
||||||
|
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
|
||||||
|
if result.ChunkSize == 0 {
|
||||||
|
return nextStreamPosition, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type pathToClear struct {
|
||||||
|
path string
|
||||||
|
entryType fs.EntryType
|
||||||
|
}
|
||||||
|
var pathsToClear []pathToClear
|
||||||
|
newEventIDs := 0
|
||||||
|
for _, entry := range result.Entries {
|
||||||
|
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
|
||||||
|
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
|
||||||
|
|
||||||
|
if entry.EventID == "" {
|
||||||
|
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := processedEventIDs[entry.EventID]; ok {
|
||||||
|
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
processedEventIDs[entry.EventID] = time.Now()
|
||||||
|
newEventIDs++
|
||||||
|
|
||||||
|
if entry.Source.ID == "" { // missing File or Folder ID
|
||||||
|
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
|
||||||
|
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only interested in event types that result in a file tree change
|
||||||
|
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
|
||||||
|
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
f.itemMetaCacheMu.Lock()
|
||||||
|
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
|
||||||
|
if cachedItemMetaFound {
|
||||||
|
if itemMeta.SequenceID >= entry.Source.SequenceID {
|
||||||
|
// Item in the cache has the same or newer SequenceID than
|
||||||
|
// this event. Ignore this event, it must be old.
|
||||||
|
f.itemMetaCacheMu.Unlock()
|
||||||
|
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// This event is newer. Delete its entry from the cache,
|
||||||
|
// we'll notify about its change below, then it's up to a
|
||||||
|
// future list operation to repopulate the cache.
|
||||||
|
delete(f.itemMetaCache, entry.Source.ID)
|
||||||
|
}
|
||||||
|
f.itemMetaCacheMu.Unlock()
|
||||||
|
|
||||||
|
entryType := fs.EntryDirectory
|
||||||
|
if entry.Source.Type == api.ItemTypeFile {
|
||||||
|
entryType = fs.EntryObject
|
||||||
|
}
|
||||||
|
|
||||||
|
// The box event only includes the new path for the object (e.g.
|
||||||
|
// the path after the object was moved). If there was an old path
|
||||||
|
// saved in our cache, it must be cleared.
|
||||||
|
if cachedItemMetaFound {
|
||||||
|
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
|
||||||
|
if path != "" {
|
||||||
|
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
|
||||||
|
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
||||||
|
} else {
|
||||||
|
fs.Debugf(f, "%s old parent not cached", eventDetails)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is a directory, also delete it from the dir cache.
|
||||||
|
// This will effectively invalidate the item metadata cache
|
||||||
|
// entries for all descendents of this directory, since we
|
||||||
|
// will no longer be able to construct a full path for them.
|
||||||
|
// This is exactly what we want, since we don't want to notify
|
||||||
|
// on the paths of these descendents if one of their ancestors
|
||||||
|
// has been renamed/deleted.
|
||||||
|
if entry.Source.Type == api.ItemTypeFolder {
|
||||||
|
f.dirCache.FlushDir(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the item is "active", then it is not trashed or deleted, so
|
||||||
|
// it potentially has a valid parent.
|
||||||
|
//
|
||||||
|
// Construct the new path of the object, based on the Parent ID
|
||||||
|
// and its name. If we get an empty result, it means we don't
|
||||||
|
// currently know about this object so notification is unnecessary.
|
||||||
|
if entry.Source.ItemStatus == api.ItemStatusActive {
|
||||||
|
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
|
||||||
|
if path != "" {
|
||||||
|
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
|
||||||
|
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
||||||
|
} else {
|
||||||
|
fs.Debugf(f, "%s new parent not found", eventDetails)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// box can sometimes repeatedly return the same Event IDs within a
|
||||||
|
// short period of time. If it stops giving us new ones, treat it
|
||||||
|
// the same as if it returned us none at all.
|
||||||
|
if newEventIDs == 0 {
|
||||||
|
return nextStreamPosition, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
notifiedPaths := make(map[string]bool)
|
||||||
|
for _, p := range pathsToClear {
|
||||||
|
if _, ok := notifiedPaths[p.path]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
notifiedPaths[p.path] = true
|
||||||
|
notifyFunc(p.path, p.entryType)
|
||||||
|
}
|
||||||
|
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing as an
|
// DirCacheFlush resets the directory cache - used in testing as an
|
||||||
// optional interface
|
// optional interface
|
||||||
func (f *Fs) DirCacheFlush() {
|
func (f *Fs) DirCacheFlush() {
|
||||||
@@ -1397,6 +1745,7 @@ var (
|
|||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
37
backend/cache/cache.go
vendored
37
backend/cache/cache.go
vendored
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
// Package cache implements a virtual provider to cache existing remotes.
|
||||||
package cache
|
package cache
|
||||||
@@ -76,17 +75,19 @@ func init() {
|
|||||||
Name: "plex_url",
|
Name: "plex_url",
|
||||||
Help: "The URL of the Plex server.",
|
Help: "The URL of the Plex server.",
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_username",
|
Name: "plex_username",
|
||||||
Help: "The username of the Plex user.",
|
Help: "The username of the Plex user.",
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_password",
|
Name: "plex_password",
|
||||||
Help: "The password of the Plex user.",
|
Help: "The password of the Plex user.",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_token",
|
Name: "plex_token",
|
||||||
Help: "The plex token for authentication - auto set normally.",
|
Help: "The plex token for authentication - auto set normally.",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_insecure",
|
Name: "plex_insecure",
|
||||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
Help: "Skip all certificate verification when connecting to the Plex server.",
|
||||||
@@ -408,18 +409,16 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
if err != nil {
|
||||||
if err != nil {
|
decPass = opt.PlexPassword
|
||||||
decPass = opt.PlexPassword
|
}
|
||||||
}
|
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
m.Set("plex_token", token)
|
||||||
m.Set("plex_token", token)
|
})
|
||||||
})
|
if err != nil {
|
||||||
if err != nil {
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
94
backend/cache/cache_internal_test.go
vendored
94
backend/cache/cache_internal_test.go
vendored
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
// +build !plan9,!js,!race
|
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -11,7 +10,6 @@ import (
|
|||||||
goflag "flag"
|
goflag "flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -30,10 +28,11 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/testy"
|
"github.com/rclone/rclone/fstest/testy"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/vfs/vfsflags"
|
"github.com/rclone/rclone/vfs/vfscommon"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -93,7 +92,7 @@ func TestMain(m *testing.M) {
|
|||||||
goflag.Parse()
|
goflag.Parse()
|
||||||
var rc int
|
var rc int
|
||||||
|
|
||||||
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
|
||||||
runInstance = newRun()
|
runInstance = newRun()
|
||||||
rc = m.Run()
|
rc = m.Run()
|
||||||
os.Exit(rc)
|
os.Exit(rc)
|
||||||
@@ -123,10 +122,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
|||||||
|
|
||||||
/* TODO: is this testing something?
|
/* TODO: is this testing something?
|
||||||
func TestInternalVfsCache(t *testing.T) {
|
func TestInternalVfsCache(t *testing.T) {
|
||||||
vfsflags.Opt.DirCacheTime = time.Second * 30
|
vfscommon.Opt.DirCacheTime = time.Second * 30
|
||||||
testSize := int64(524288000)
|
testSize := int64(524288000)
|
||||||
|
|
||||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
|
||||||
id := "tiuufo"
|
id := "tiuufo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
@@ -338,7 +337,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||||
vfsflags.Opt.DirCacheTime = time.Second
|
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skip("test skipped with crypt remote")
|
t.Skip("test skipped with crypt remote")
|
||||||
@@ -368,7 +367,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||||
vfsflags.Opt.DirCacheTime = time.Second
|
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skip("test skipped with crypt remote")
|
t.Skip("test skipped with crypt remote")
|
||||||
@@ -408,7 +407,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("original size: %v", originalSize)
|
fs.Logf(nil, "original size: %v", originalSize)
|
||||||
|
|
||||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -417,7 +416,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
expectedSize++ // FIXME newline gets in, likely test data issue
|
||||||
} else {
|
} else {
|
||||||
data2 = []byte("test content")
|
data2 = []byte("test content")
|
||||||
}
|
}
|
||||||
@@ -425,7 +424,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data2)), o.Size())
|
require.Equal(t, int64(len(data2)), o.Size())
|
||||||
log.Printf("updated size: %v", len(data2))
|
fs.Logf(nil, "updated size: %v", len(data2))
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
if runInstance.wrappedIsExternal {
|
if runInstance.wrappedIsExternal {
|
||||||
@@ -485,49 +484,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
li, err := runInstance.list(t, rootFs, "test")
|
li, err := runInstance.list(t, rootFs, "test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("err: %v", err)
|
fs.Logf(nil, "err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 2 {
|
if len(li) != 2 {
|
||||||
log.Printf("not expected listing /test: %v", li)
|
fs.Logf(nil, "not expected listing /test: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test: %v", li)
|
return fmt.Errorf("not expected listing /test: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/one")
|
li, err = runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("err: %v", err)
|
fs.Logf(nil, "err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 0 {
|
if len(li) != 0 {
|
||||||
log.Printf("not expected listing /test/one: %v", li)
|
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/second")
|
li, err = runInstance.list(t, rootFs, "test/second")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("err: %v", err)
|
fs.Logf(nil, "err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
log.Printf("not expected listing /test/second: %v", li)
|
fs.Logf(nil, "not expected listing /test/second: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "data.bin" {
|
if fi.Name() != "data.bin" {
|
||||||
log.Printf("not expected name: %v", fi.Name())
|
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/second/data.bin" {
|
if di.Remote() != "test/second/data.bin" {
|
||||||
log.Printf("not expected remote: %v", di.Remote())
|
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unexpected listing: %v", li)
|
fs.Logf(nil, "unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("complete listing: %v", li)
|
fs.Logf(nil, "complete listing: %v", li)
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -577,43 +576,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test")
|
fs.Logf(nil, "not found /test")
|
||||||
return fmt.Errorf("not found /test")
|
return fmt.Errorf("not found /test")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test/one")
|
fs.Logf(nil, "not found /test/one")
|
||||||
return fmt.Errorf("not found /test/one")
|
return fmt.Errorf("not found /test/one")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test/one/test2")
|
fs.Logf(nil, "not found /test/one/test2")
|
||||||
return fmt.Errorf("not found /test/one/test2")
|
return fmt.Errorf("not found /test/one/test2")
|
||||||
}
|
}
|
||||||
li, err := runInstance.list(t, rootFs, "test/one")
|
li, err := runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("err: %v", err)
|
fs.Logf(nil, "err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
log.Printf("not expected listing /test/one: %v", li)
|
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "test2" {
|
if fi.Name() != "test2" {
|
||||||
log.Printf("not expected name: %v", fi.Name())
|
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/one/test2" {
|
if di.Remote() != "test/one/test2" {
|
||||||
log.Printf("not expected remote: %v", di.Remote())
|
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unexpected listing: %v", li)
|
fs.Logf(nil, "unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
log.Printf("complete listing /test/one/test2")
|
fs.Logf(nil, "complete listing /test/one/test2")
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -708,7 +707,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -743,7 +742,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalBug2117(t *testing.T) {
|
func TestInternalBug2117(t *testing.T) {
|
||||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
||||||
|
|
||||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||||
@@ -771,24 +770,24 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
|
|
||||||
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("len: %v", len(di))
|
fs.Logf(nil, "len: %v", len(di))
|
||||||
require.Len(t, di, 1)
|
require.Len(t, di, 1)
|
||||||
|
|
||||||
time.Sleep(time.Second * 30)
|
time.Sleep(time.Second * 30)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("len: %v", len(di))
|
fs.Logf(nil, "len: %v", len(di))
|
||||||
require.Len(t, di, 1)
|
require.Len(t, di, 1)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test/dir1")
|
di, err = runInstance.list(t, rootFs, "test/dir1")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("len: %v", len(di))
|
fs.Logf(nil, "len: %v", len(di))
|
||||||
require.Len(t, di, 4)
|
require.Len(t, di, 4)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test")
|
di, err = runInstance.list(t, rootFs, "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("len: %v", len(di))
|
fs.Logf(nil, "len: %v", len(di))
|
||||||
require.Len(t, di, 4)
|
require.Len(t, di, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -829,7 +828,7 @@ func newRun() *run {
|
|||||||
} else {
|
} else {
|
||||||
r.tmpUploadDir = uploadDir
|
r.tmpUploadDir = uploadDir
|
||||||
}
|
}
|
||||||
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
|
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
|
||||||
|
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
@@ -850,8 +849,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
|||||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||||
fstest.Initialise()
|
fstest.Initialise()
|
||||||
remoteExists := false
|
remoteExists := false
|
||||||
for _, s := range config.FileSections() {
|
for _, s := range config.GetRemotes() {
|
||||||
if s == remote {
|
if s.Name == remote {
|
||||||
remoteExists = true
|
remoteExists = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -875,12 +874,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
cacheRemote := remote
|
cacheRemote := remote
|
||||||
if !remoteExists {
|
if !remoteExists {
|
||||||
localRemote := remote + "-local"
|
localRemote := remote + "-local"
|
||||||
config.FileSet(localRemote, "type", "local")
|
config.FileSetValue(localRemote, "type", "local")
|
||||||
config.FileSet(localRemote, "nounc", "true")
|
config.FileSetValue(localRemote, "nounc", "true")
|
||||||
m.Set("type", "cache")
|
m.Set("type", "cache")
|
||||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||||
} else {
|
} else {
|
||||||
remoteType := config.FileGet(remote, "type")
|
remoteType := config.GetValue(remote, "type")
|
||||||
if remoteType == "" {
|
if remoteType == "" {
|
||||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -891,14 +890,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
m.Set("password", cryptPassword1)
|
m.Set("password", cryptPassword1)
|
||||||
m.Set("password2", cryptPassword2)
|
m.Set("password2", cryptPassword2)
|
||||||
}
|
}
|
||||||
remoteRemote := config.FileGet(remote, "remote")
|
remoteRemote := config.GetValue(remote, "remote")
|
||||||
if remoteRemote == "" {
|
if remoteRemote == "" {
|
||||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||||
remoteWrapping := remoteRemoteParts[0]
|
remoteWrapping := remoteRemoteParts[0]
|
||||||
remoteType := config.FileGet(remoteWrapping, "type")
|
remoteType := config.GetValue(remoteWrapping, "type")
|
||||||
if remoteType != "cache" {
|
if remoteType != "cache" {
|
||||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -935,8 +934,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
if purge {
|
if purge {
|
||||||
_ = f.Features().Purge(context.Background(), "")
|
_ = operations.Purge(context.Background(), f, "")
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
err = f.Mkdir(context.Background(), "")
|
err = f.Mkdir(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -949,7 +947,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
||||||
err := f.Features().Purge(context.Background(), "")
|
err := operations.Purge(context.Background(), f, "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -1193,7 +1191,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||||||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||||
if r.rootIsCrypt {
|
if r.rootIsCrypt {
|
||||||
denominator := int64(65536 + 16)
|
denominator := int64(65536 + 16)
|
||||||
size = size - 32
|
size -= 32
|
||||||
quotient := size / denominator
|
quotient := size / denominator
|
||||||
remainder := size % denominator
|
remainder := size % denominator
|
||||||
return (quotient*65536 + remainder - 16)
|
return (quotient*65536 + remainder - 16)
|
||||||
|
|||||||
12
backend/cache/cache_test.go
vendored
12
backend/cache/cache_test.go
vendored
@@ -1,7 +1,6 @@
|
|||||||
// Test Cache filesystem interface
|
// Test Cache filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
// +build !plan9,!js,!race
|
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -16,10 +15,11 @@ import (
|
|||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestCache:",
|
RemoteName: "TestCache:",
|
||||||
NilObject: (*cache.Object)(nil),
|
NilObject: (*cache.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
||||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
||||||
|
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -2,6 +2,6 @@
|
|||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || js
|
//go:build plan9 || js
|
||||||
// +build plan9 js
|
|
||||||
|
|
||||||
|
// Package cache implements a virtual provider to cache existing remotes.
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
5
backend/cache/cache_upload_test.go
vendored
5
backend/cache/cache_upload_test.go
vendored
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
// +build !plan9,!js,!race
|
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -160,11 +159,11 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
|||||||
minSize := 5242880
|
minSize := 5242880
|
||||||
maxSize := 10485760
|
maxSize := 10485760
|
||||||
totalFiles := 10
|
totalFiles := 10
|
||||||
rand.Seed(time.Now().Unix())
|
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||||
|
|
||||||
lastFile := ""
|
lastFile := ""
|
||||||
for i := 0; i < totalFiles; i++ {
|
for i := 0; i < totalFiles; i++ {
|
||||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||||
testReader := runInstance.randomReader(t, size)
|
testReader := runInstance.randomReader(t, size)
|
||||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||||
|
|||||||
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
13
backend/cache/handle.go
vendored
13
backend/cache/handle.go
vendored
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@@ -119,7 +118,7 @@ func (r *Handle) startReadWorkers() {
|
|||||||
r.scaleWorkers(totalWorkers)
|
r.scaleWorkers(totalWorkers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// scaleOutWorkers will increase the worker pool count by the provided amount
|
// scaleWorkers will increase the worker pool count by the provided amount
|
||||||
func (r *Handle) scaleWorkers(desired int) {
|
func (r *Handle) scaleWorkers(desired int) {
|
||||||
current := r.workers
|
current := r.workers
|
||||||
if current == desired {
|
if current == desired {
|
||||||
@@ -209,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||||
|
|
||||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||||
chunkStart = chunkStart - offset
|
chunkStart -= offset
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
found := false
|
found := false
|
||||||
|
|
||||||
@@ -328,7 +327,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||||||
|
|
||||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
||||||
}
|
}
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
|
|
||||||
@@ -416,10 +415,8 @@ func (w *worker) run() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
continue
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||||
|
|||||||
1
backend/cache/object.go
vendored
1
backend/cache/object.go
vendored
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
1
backend/cache/plex.go
vendored
1
backend/cache/plex.go
vendored
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
1
backend/cache/storage_memory.go
vendored
1
backend/cache/storage_memory.go
vendored
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
1
backend/cache/storage_persistent.go
vendored
1
backend/cache/storage_persistent.go
vendored
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
3
backend/cache/utils_test.go
vendored
3
backend/cache/utils_test.go
vendored
@@ -1,3 +1,6 @@
|
|||||||
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import bolt "go.etcd.io/bbolt"
|
import bolt "go.etcd.io/bbolt"
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Chunker's composite files have one or more chunks
|
// Chunker's composite files have one or more chunks
|
||||||
@@ -101,8 +102,10 @@ var (
|
|||||||
//
|
//
|
||||||
// And still chunker's primary function is to chunk large files
|
// And still chunker's primary function is to chunk large files
|
||||||
// rather than serve as a generic metadata container.
|
// rather than serve as a generic metadata container.
|
||||||
const maxMetadataSize = 1023
|
const (
|
||||||
const maxMetadataSizeWritten = 255
|
maxMetadataSize = 1023
|
||||||
|
maxMetadataSizeWritten = 255
|
||||||
|
)
|
||||||
|
|
||||||
// Current/highest supported metadata format.
|
// Current/highest supported metadata format.
|
||||||
const metadataVersion = 2
|
const metadataVersion = 2
|
||||||
@@ -305,7 +308,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
root: rpath,
|
root: rpath,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.base, f)
|
|
||||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||||
|
|
||||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
||||||
@@ -317,26 +319,41 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
// i.e. `rpath` does not exist in the wrapped remote, but chunker
|
// i.e. `rpath` does not exist in the wrapped remote, but chunker
|
||||||
// detects a composite file because it finds the first chunk!
|
// detects a composite file because it finds the first chunk!
|
||||||
// (yet can't satisfy fstest.CheckListing, will ignore)
|
// (yet can't satisfy fstest.CheckListing, will ignore)
|
||||||
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
if err == nil && !f.useMeta {
|
||||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
||||||
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
||||||
if testErr == fs.ErrorIsFile {
|
if testErr == fs.ErrorIsFile {
|
||||||
|
f.base = newBase
|
||||||
err = testErr
|
err = testErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
cache.PinUntilFinalized(f.base, f)
|
||||||
|
|
||||||
|
// Correct root if definitely pointing to a file
|
||||||
|
if err == fs.ErrorIsFile {
|
||||||
|
f.root = path.Dir(f.root)
|
||||||
|
if f.root == "." || f.root == "/" {
|
||||||
|
f.root = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Note 1: the features here are ones we could support, and they are
|
// Note 1: the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs.
|
// ANDed with the ones from wrappedFs.
|
||||||
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
||||||
// but features.Mask() will nullify it if wrappedFs does not have it.
|
// but features.Mask() will nullify it if wrappedFs does not have it.
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: false, // Object.MimeType not supported
|
ReadMimeType: false, // Object.MimeType not supported
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ServerSideAcrossConfigs: true,
|
ServerSideAcrossConfigs: true,
|
||||||
|
ReadDirMetadata: true,
|
||||||
|
WriteDirMetadata: true,
|
||||||
|
WriteDirSetModTime: true,
|
||||||
|
UserDirMetadata: true,
|
||||||
|
DirModTimeUpdatesOnWrite: true,
|
||||||
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
||||||
|
|
||||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||||
@@ -813,8 +830,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
|||||||
}
|
}
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
isSubdir[entry.Remote()] = true
|
isSubdir[entry.Remote()] = true
|
||||||
wrapDir := fs.NewDirCopy(ctx, entry)
|
wrapDir := fs.NewDirWrapper(entry.Remote(), entry)
|
||||||
wrapDir.SetRemote(entry.Remote())
|
|
||||||
tempEntries = append(tempEntries, wrapDir)
|
tempEntries = append(tempEntries, wrapDir)
|
||||||
default:
|
default:
|
||||||
if f.opt.FailHard {
|
if f.opt.FailHard {
|
||||||
@@ -947,6 +963,11 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
}
|
}
|
||||||
if caseInsensitive {
|
if caseInsensitive {
|
||||||
sameMain = strings.EqualFold(mainRemote, remote)
|
sameMain = strings.EqualFold(mainRemote, remote)
|
||||||
|
if sameMain && f.base.Features().IsLocal {
|
||||||
|
// on local, make sure the EqualFold still holds true when accounting for encoding.
|
||||||
|
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
|
||||||
|
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
sameMain = mainRemote == remote
|
sameMain = mainRemote == remote
|
||||||
}
|
}
|
||||||
@@ -960,13 +981,13 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
// fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||||
if err := o.addChunk(entry, chunkNo); err != nil {
|
if err := o.addChunk(entry, chunkNo); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
if o.main == nil && len(o.chunks) == 0 {
|
||||||
// Scanning hasn't found data chunks with conforming names.
|
// Scanning hasn't found data chunks with conforming names.
|
||||||
if f.useMeta || quickScan {
|
if f.useMeta || quickScan {
|
||||||
// Metadata is required but absent and there are no chunks.
|
// Metadata is required but absent and there are no chunks.
|
||||||
@@ -1122,8 +1143,8 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
|||||||
// put implements Put, PutStream, PutUnchecked, Update
|
// put implements Put, PutStream, PutUnchecked, Update
|
||||||
func (f *Fs) put(
|
func (f *Fs) put(
|
||||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||||
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
basePut putFn, action string, target fs.Object,
|
||||||
|
) (obj fs.Object, err error) {
|
||||||
// Perform consistency checks
|
// Perform consistency checks
|
||||||
if err := f.forbidChunk(src, remote); err != nil {
|
if err := f.forbidChunk(src, remote); err != nil {
|
||||||
return nil, fmt.Errorf("%s refused: %w", action, err)
|
return nil, fmt.Errorf("%s refused: %w", action, err)
|
||||||
@@ -1563,6 +1584,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return f.base.Mkdir(ctx, dir)
|
return f.base.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MkdirMetadata makes the root directory of the Fs object
|
||||||
|
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||||
|
if do := f.base.Features().MkdirMetadata; do != nil {
|
||||||
|
return do(ctx, dir, metadata)
|
||||||
|
}
|
||||||
|
return nil, fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
@@ -1880,6 +1909,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return do(ctx, srcFs.base, srcRemote, dstRemote)
|
return do(ctx, srcFs.base, srcRemote, dstRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DirSetModTime sets the directory modtime for dir
|
||||||
|
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||||
|
if do := f.base.Features().DirSetModTime; do != nil {
|
||||||
|
return do(ctx, dir, modTime)
|
||||||
|
}
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
@@ -1928,7 +1965,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||||
if entryType == fs.EntryObject {
|
if entryType == fs.EntryObject {
|
||||||
mainPath, _, _, xactID := f.parseChunkName(path)
|
mainPath, _, _, xactID := f.parseChunkName(path)
|
||||||
metaXactID := ""
|
metaXactID := ""
|
||||||
@@ -2540,6 +2577,8 @@ var (
|
|||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
|
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||||
|
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
|
|||||||
@@ -36,10 +36,12 @@ func TestIntegration(t *testing.T) {
|
|||||||
"GetTier",
|
"GetTier",
|
||||||
"SetTier",
|
"SetTier",
|
||||||
"Metadata",
|
"Metadata",
|
||||||
|
"SetMetadata",
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{
|
UnimplementableFsMethods: []string{
|
||||||
"PublicLink",
|
"PublicLink",
|
||||||
"OpenWriterAt",
|
"OpenWriterAt",
|
||||||
|
"OpenChunkWriter",
|
||||||
"MergeDirs",
|
"MergeDirs",
|
||||||
"DirCacheFlush",
|
"DirCacheFlush",
|
||||||
"UserInfo",
|
"UserInfo",
|
||||||
|
|||||||
@@ -222,18 +222,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
}
|
}
|
||||||
// check features
|
// check features
|
||||||
var features = (&fs.Features{
|
var features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
PartialUploads: true,
|
ReadDirMetadata: true,
|
||||||
|
WriteDirMetadata: true,
|
||||||
|
WriteDirSetModTime: true,
|
||||||
|
UserDirMetadata: true,
|
||||||
|
DirModTimeUpdatesOnWrite: true,
|
||||||
|
PartialUploads: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
canMove := true
|
canMove := true
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
@@ -440,6 +445,32 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return u.f.Mkdir(ctx, uRemote)
|
return u.f.Mkdir(ctx, uRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MkdirMetadata makes the root directory of the Fs object
|
||||||
|
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||||
|
u, uRemote, err := f.findUpstream(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
do := u.f.Features().MkdirMetadata
|
||||||
|
if do == nil {
|
||||||
|
return nil, fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
newDir, err := do(ctx, uRemote, metadata)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entries := fs.DirEntries{newDir}
|
||||||
|
entries, err = u.wrapEntries(ctx, entries)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
newDir, ok := entries[0].(fs.Directory)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
||||||
|
}
|
||||||
|
return newDir, nil
|
||||||
|
}
|
||||||
|
|
||||||
// purge the upstream or fallback to a slow way
|
// purge the upstream or fallback to a slow way
|
||||||
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
||||||
if do := u.f.Features().Purge; do != nil {
|
if do := u.f.Features().Purge; do != nil {
|
||||||
@@ -755,12 +786,11 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
|
|||||||
case fs.Object:
|
case fs.Object:
|
||||||
entries[i] = u.newObject(x)
|
entries[i] = u.newObject(x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
newDir := fs.NewDirCopy(ctx, x)
|
newPath, err := u.pathAdjustment.do(x.Remote())
|
||||||
newPath, err := u.pathAdjustment.do(newDir.Remote())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
newDir.SetRemote(newPath)
|
newDir := fs.NewDirWrapper(newPath, x)
|
||||||
entries[i] = newDir
|
entries[i] = newDir
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown entry type %T", entry)
|
return nil, fmt.Errorf("unknown entry type %T", entry)
|
||||||
@@ -783,7 +813,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if f.root == "" && dir == "" {
|
if f.root == "" && dir == "" {
|
||||||
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||||
for combineDir := range f.upstreams {
|
for combineDir := range f.upstreams {
|
||||||
d := fs.NewDir(combineDir, f.when)
|
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
}
|
}
|
||||||
return entries, nil
|
return entries, nil
|
||||||
@@ -914,7 +944,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
return do(ctx, uRemote, expire, unlink)
|
return do(ctx, uRemote, expire, unlink)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
// PutUnchecked in to the remote path with the modTime given of the given size
|
||||||
//
|
//
|
||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
@@ -965,6 +995,22 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
return do(ctx, uDirs)
|
return do(ctx, uDirs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DirSetModTime sets the directory modtime for dir
|
||||||
|
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||||
|
u, uDir, err := f.findUpstream(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if uDir == "" {
|
||||||
|
fs.Debugf(dir, "Can't set modtime on upstream root. skipping.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if do := u.f.Features().DirSetModTime; do != nil {
|
||||||
|
return do(ctx, uDir, modTime)
|
||||||
|
}
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
@@ -1073,6 +1119,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets metadata for an Object
|
||||||
|
//
|
||||||
|
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||||
|
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||||
|
do, ok := o.Object.(fs.SetMetadataer)
|
||||||
|
if !ok {
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do.SetMetadata(ctx, metadata)
|
||||||
|
}
|
||||||
|
|
||||||
// SetTier performs changing storage tier of the Object if
|
// SetTier performs changing storage tier of the Object if
|
||||||
// multiple storage classes supported
|
// multiple storage classes supported
|
||||||
func (o *Object) SetTier(tier string) error {
|
func (o *Object) SetTier(tier string) error {
|
||||||
@@ -1099,6 +1156,8 @@ var (
|
|||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
|
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||||
|
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||||
_ fs.FullObject = (*Object)(nil)
|
_ fs.FullObject = (*Object)(nil)
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
|
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
|
||||||
unimplementableObjectMethods = []string{}
|
unimplementableObjectMethods = []string{}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -37,6 +38,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||||
|
chunkStreams = 0 // Streams to use for reading
|
||||||
|
|
||||||
bufferSize = 8388608
|
bufferSize = 8388608
|
||||||
heuristicBytes = 1048576
|
heuristicBytes = 1048576
|
||||||
@@ -172,21 +174,33 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
opt: *opt,
|
opt: *opt,
|
||||||
mode: compressionModeFromName(opt.CompressionMode),
|
mode: compressionModeFromName(opt.CompressionMode),
|
||||||
}
|
}
|
||||||
|
// Correct root if definitely pointing to a file
|
||||||
|
if err == fs.ErrorIsFile {
|
||||||
|
f.root = path.Dir(f.root)
|
||||||
|
if f.root == "." || f.root == "/" {
|
||||||
|
f.root = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
ReadMimeType: false,
|
ReadMimeType: false,
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
PartialUploads: true,
|
ReadDirMetadata: true,
|
||||||
|
WriteDirMetadata: true,
|
||||||
|
WriteDirSetModTime: true,
|
||||||
|
UserDirMetadata: true,
|
||||||
|
DirModTimeUpdatesOnWrite: true,
|
||||||
|
PartialUploads: true,
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
// We support reading MIME types no matter the wrapped fs
|
// We support reading MIME types no matter the wrapped fs
|
||||||
f.features.ReadMimeType = true
|
f.features.ReadMimeType = true
|
||||||
@@ -257,6 +271,16 @@ func isMetadataFile(filename string) bool {
|
|||||||
return strings.HasSuffix(filename, metaFileExt)
|
return strings.HasSuffix(filename, metaFileExt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checks whether a file is a metadata file and returns the original
|
||||||
|
// file name and a flag indicating whether it was a metadata file or
|
||||||
|
// not.
|
||||||
|
func unwrapMetadataFile(filename string) (string, bool) {
|
||||||
|
if !isMetadataFile(filename) {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return filename[:len(filename)-len(metaFileExt)], true
|
||||||
|
}
|
||||||
|
|
||||||
// makeDataName generates the file name for a data file with specified compression mode
|
// makeDataName generates the file name for a data file with specified compression mode
|
||||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||||
if mode != Uncompressed {
|
if mode != Uncompressed {
|
||||||
@@ -432,7 +456,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -766,6 +790,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return f.Fs.Mkdir(ctx, dir)
|
return f.Fs.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MkdirMetadata makes the root directory of the Fs object
|
||||||
|
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||||
|
if do := f.Fs.Features().MkdirMetadata; do != nil {
|
||||||
|
return do(ctx, dir, metadata)
|
||||||
|
}
|
||||||
|
return nil, fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
@@ -909,6 +941,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DirSetModTime sets the directory modtime for dir
|
||||||
|
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||||
|
if do := f.Fs.Features().DirSetModTime; do != nil {
|
||||||
|
return do(ctx, dir, modTime)
|
||||||
|
}
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
@@ -979,7 +1019,8 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
fs.Logf(f, "path %q entryType %d", path, entryType)
|
fs.Logf(f, "path %q entryType %d", path, entryType)
|
||||||
var (
|
var (
|
||||||
wrappedPath string
|
wrappedPath string
|
||||||
|
isMetadataFile bool
|
||||||
)
|
)
|
||||||
switch entryType {
|
switch entryType {
|
||||||
case fs.EntryDirectory:
|
case fs.EntryDirectory:
|
||||||
@@ -987,7 +1028,10 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
case fs.EntryObject:
|
case fs.EntryObject:
|
||||||
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
||||||
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
||||||
wrappedPath = makeMetadataName(path)
|
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
|
||||||
|
if !isMetadataFile {
|
||||||
|
return
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||||
return
|
return
|
||||||
@@ -1243,6 +1287,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets metadata for an Object
|
||||||
|
//
|
||||||
|
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||||
|
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||||
|
do, ok := o.Object.(fs.SetMetadataer)
|
||||||
|
if !ok {
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do.SetMetadata(ctx, metadata)
|
||||||
|
}
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
@@ -1308,7 +1363,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Get a chunkedreader for the wrapped object
|
// Get a chunkedreader for the wrapped object
|
||||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||||
// Get file handle
|
// Get file handle
|
||||||
var file io.Reader
|
var file io.Reader
|
||||||
if offset != 0 {
|
if offset != 0 {
|
||||||
@@ -1475,6 +1530,8 @@ var (
|
|||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
|
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||||
|
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
_ fs.UnWrapper = (*Fs)(nil)
|
||||||
|
|||||||
@@ -14,23 +14,26 @@ import (
|
|||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var defaultOpt = fstests.Opt{
|
||||||
|
RemoteName: "TestCompress:",
|
||||||
|
NilObject: (*Object)(nil),
|
||||||
|
UnimplementableFsMethods: []string{
|
||||||
|
"OpenWriterAt",
|
||||||
|
"OpenChunkWriter",
|
||||||
|
"MergeDirs",
|
||||||
|
"DirCacheFlush",
|
||||||
|
"PutUnchecked",
|
||||||
|
"PutStream",
|
||||||
|
"UserInfo",
|
||||||
|
"Disconnect",
|
||||||
|
},
|
||||||
|
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||||
|
UnimplementableObjectMethods: []string{},
|
||||||
|
}
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
opt := fstests.Opt{
|
fstests.Run(t, &defaultOpt)
|
||||||
RemoteName: *fstest.RemoteName,
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
UnimplementableFsMethods: []string{
|
|
||||||
"OpenWriterAt",
|
|
||||||
"MergeDirs",
|
|
||||||
"DirCacheFlush",
|
|
||||||
"PutUnchecked",
|
|
||||||
"PutStream",
|
|
||||||
"UserInfo",
|
|
||||||
"Disconnect",
|
|
||||||
},
|
|
||||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
|
||||||
UnimplementableObjectMethods: []string{}}
|
|
||||||
fstests.Run(t, &opt)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRemoteGzip tests GZIP compression
|
// TestRemoteGzip tests GZIP compression
|
||||||
@@ -40,27 +43,13 @@ func TestRemoteGzip(t *testing.T) {
|
|||||||
}
|
}
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||||
name := "TestCompressGzip"
|
name := "TestCompressGzip"
|
||||||
fstests.Run(t, &fstests.Opt{
|
opt := defaultOpt
|
||||||
RemoteName: name + ":",
|
opt.RemoteName = name + ":"
|
||||||
NilObject: (*Object)(nil),
|
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||||
UnimplementableFsMethods: []string{
|
{Name: name, Key: "type", Value: "compress"},
|
||||||
"OpenWriterAt",
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
"MergeDirs",
|
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||||
"DirCacheFlush",
|
}
|
||||||
"PutUnchecked",
|
opt.QuickTestOK = true
|
||||||
"PutStream",
|
fstests.Run(t, &opt)
|
||||||
"UserInfo",
|
|
||||||
"Disconnect",
|
|
||||||
},
|
|
||||||
UnimplementableObjectMethods: []string{
|
|
||||||
"GetTier",
|
|
||||||
"SetTier",
|
|
||||||
},
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "compress"},
|
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
|
||||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -329,7 +329,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
|||||||
for _, runeValue := range plaintext {
|
for _, runeValue := range plaintext {
|
||||||
dir += int(runeValue)
|
dir += int(runeValue)
|
||||||
}
|
}
|
||||||
dir = dir % 256
|
dir %= 256
|
||||||
|
|
||||||
// We'll use this number to store in the result filename...
|
// We'll use this number to store in the result filename...
|
||||||
var result bytes.Buffer
|
var result bytes.Buffer
|
||||||
@@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
if pos >= 26 {
|
if pos >= 26 {
|
||||||
pos -= 6
|
pos -= 6
|
||||||
}
|
}
|
||||||
pos = pos - thisdir
|
pos -= thisdir
|
||||||
if pos < 0 {
|
if pos < 0 {
|
||||||
pos += 52
|
pos += 52
|
||||||
}
|
}
|
||||||
@@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||||
// Zero out the bad block and continue
|
// Zero out the bad block and continue
|
||||||
for i := range (*fh.buf)[:n] {
|
for i := range (*fh.buf)[:n] {
|
||||||
(*fh.buf)[i] = 0
|
fh.buf[i] = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
|
|||||||
@@ -130,6 +130,16 @@ trying to recover an encrypted file with errors and it is desired to
|
|||||||
recover as much of the file as possible.`,
|
recover as much of the file as possible.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "strict_names",
|
||||||
|
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
|
||||||
|
|
||||||
|
(By default, rclone will just log a NOTICE and continue as normal.)
|
||||||
|
This can happen if encrypted and unencrypted files are stored in the same
|
||||||
|
directory (which is not recommended.) It may also indicate a more serious
|
||||||
|
problem that should be investigated.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "filename_encoding",
|
Name: "filename_encoding",
|
||||||
Help: `How to encode the encrypted filename to text string.
|
Help: `How to encode the encrypted filename to text string.
|
||||||
@@ -253,22 +263,34 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
cipher: cipher,
|
cipher: cipher,
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.Fs, f)
|
cache.PinUntilFinalized(f.Fs, f)
|
||||||
|
// Correct root if definitely pointing to a file
|
||||||
|
if err == fs.ErrorIsFile {
|
||||||
|
f.root = path.Dir(f.root)
|
||||||
|
if f.root == "." || f.root == "/" {
|
||||||
|
f.root = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
PartialUploads: true,
|
ReadDirMetadata: true,
|
||||||
|
WriteDirMetadata: true,
|
||||||
|
WriteDirSetModTime: true,
|
||||||
|
UserDirMetadata: true,
|
||||||
|
DirModTimeUpdatesOnWrite: true,
|
||||||
|
PartialUploads: true,
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
@@ -287,6 +309,7 @@ type Options struct {
|
|||||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
PassBadBlocks bool `config:"pass_bad_blocks"`
|
||||||
FilenameEncoding string `config:"filename_encoding"`
|
FilenameEncoding string `config:"filename_encoding"`
|
||||||
Suffix string `config:"suffix"`
|
Suffix string `config:"suffix"`
|
||||||
|
StrictNames bool `config:"strict_names"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
@@ -321,45 +344,64 @@ func (f *Fs) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt an object file name to entries.
|
// Encrypt an object file name to entries.
|
||||||
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error {
|
||||||
remote := obj.Remote()
|
remote := obj.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
if f.opt.StrictNames {
|
||||||
return
|
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err)
|
||||||
|
}
|
||||||
|
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newObject(obj))
|
*entries = append(*entries, f.newObject(obj))
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt a directory file name to entries.
|
// Encrypt a directory file name to entries.
|
||||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error {
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
if f.opt.StrictNames {
|
||||||
return
|
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err)
|
||||||
|
}
|
||||||
|
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newDir(ctx, dir))
|
*entries = append(*entries, f.newDir(ctx, dir))
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||||
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||||
newEntries = entries[:0] // in place filter
|
newEntries = entries[:0] // in place filter
|
||||||
|
errors := 0
|
||||||
|
var firsterr error
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
switch x := entry.(type) {
|
switch x := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
f.add(&newEntries, x)
|
err = f.add(&newEntries, x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
f.addDir(ctx, &newEntries, x)
|
err = f.addDir(ctx, &newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
|
errors++
|
||||||
|
if firsterr == nil {
|
||||||
|
firsterr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if firsterr != nil {
|
||||||
|
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
}
|
}
|
||||||
@@ -478,7 +520,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
|
||||||
}
|
}
|
||||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||||
}
|
}
|
||||||
@@ -513,6 +555,37 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MkdirMetadata makes the root directory of the Fs object
|
||||||
|
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||||
|
do := f.Fs.Features().MkdirMetadata
|
||||||
|
if do == nil {
|
||||||
|
return nil, fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var entries = make(fs.DirEntries, 0, 1)
|
||||||
|
err = f.addDir(ctx, &entries, newDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
newDir, ok := entries[0].(fs.Directory)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
||||||
|
}
|
||||||
|
return newDir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirSetModTime sets the directory modtime for dir
|
||||||
|
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||||
|
do := f.Fs.Features().DirSetModTime
|
||||||
|
if do == nil {
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
|
||||||
|
}
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
@@ -754,7 +827,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
}
|
}
|
||||||
out := make([]fs.Directory, len(dirs))
|
out := make([]fs.Directory, len(dirs))
|
||||||
for i, dir := range dirs {
|
for i, dir := range dirs {
|
||||||
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir)
|
||||||
}
|
}
|
||||||
return do(ctx, out)
|
return do(ctx, out)
|
||||||
}
|
}
|
||||||
@@ -990,14 +1063,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// newDir returns a dir with the Name decrypted
|
// newDir returns a dir with the Name decrypted
|
||||||
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||||
newDir := fs.NewDirCopy(ctx, dir)
|
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||||
} else {
|
} else {
|
||||||
newDir.SetRemote(decryptedRemote)
|
remote = decryptedRemote
|
||||||
}
|
}
|
||||||
|
newDir := fs.NewDirWrapper(remote, dir)
|
||||||
return newDir
|
return newDir
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1175,6 +1248,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets metadata for an Object
|
||||||
|
//
|
||||||
|
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||||
|
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||||
|
do, ok := o.Object.(fs.SetMetadataer)
|
||||||
|
if !ok {
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do.SetMetadata(ctx, metadata)
|
||||||
|
}
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
// MimeType returns the content type of the Object if
|
||||||
// known, or "" if not
|
// known, or "" if not
|
||||||
//
|
//
|
||||||
@@ -1200,6 +1284,8 @@ var (
|
|||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
|
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||||
|
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: *fstest.RemoteName,
|
RemoteName: *fstest.RemoteName,
|
||||||
NilObject: (*crypt.Object)(nil),
|
NilObject: (*crypt.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
|
|||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
|
|||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
|
|||||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
|
|||||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
|||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{"doc", []string{".doc"}, nil},
|
{"doc", []string{".doc"}, nil},
|
||||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil},
|
||||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||||
} {
|
} {
|
||||||
@@ -524,12 +524,49 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/Query
|
||||||
|
func (f *Fs) InternalTestQuery(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
var err error
|
||||||
|
t.Run("BadQuery", func(t *testing.T) {
|
||||||
|
_, err = f.query(ctx, "this is a bad query")
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "failed to execute query")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("NoMatch", func(t *testing.T) {
|
||||||
|
results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir))
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, results, 0)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("GoodQuery", func(t *testing.T) {
|
||||||
|
pathSegments := strings.Split(existingFile, "/")
|
||||||
|
var parent string
|
||||||
|
for _, item := range pathSegments {
|
||||||
|
// the file name contains ' characters which must be escaped
|
||||||
|
escapedItem := f.opt.Enc.FromStandardName(item)
|
||||||
|
escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`)
|
||||||
|
escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`)
|
||||||
|
|
||||||
|
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, len(results) > 0)
|
||||||
|
for _, result := range results {
|
||||||
|
assert.True(t, len(result.Id) > 0)
|
||||||
|
assert.Equal(t, result.Name, item)
|
||||||
|
}
|
||||||
|
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||||
// Check set up for filtering
|
// Check set up for filtering
|
||||||
assert.True(t, f.Features().FilterAware)
|
assert.True(t, f.Features().FilterAware)
|
||||||
|
|
||||||
opt := &filter.Opt{}
|
opt := &filter.Options{}
|
||||||
err := opt.MaxAge.Set("1h")
|
err := opt.MaxAge.Set("1h")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
flt, err := filter.NewFilter(opt)
|
flt, err := filter.NewFilter(opt)
|
||||||
@@ -611,6 +648,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||||
t.Run("CopyID", f.InternalTestCopyID)
|
t.Run("CopyID", f.InternalTestCopyID)
|
||||||
|
t.Run("Query", f.InternalTestQuery)
|
||||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||||
}
|
}
|
||||||
|
|||||||
638
backend/drive/metadata.go
Normal file
638
backend/drive/metadata.go
Normal file
@@ -0,0 +1,638 @@
|
|||||||
|
package drive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/lib/errcount"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
drive "google.golang.org/api/drive/v3"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
// system metadata keys which this backend owns
|
||||||
|
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||||
|
"content-type": {
|
||||||
|
Help: "The MIME type of the file.",
|
||||||
|
Type: "string",
|
||||||
|
Example: "text/plain",
|
||||||
|
},
|
||||||
|
"mtime": {
|
||||||
|
Help: "Time of last modification with mS accuracy.",
|
||||||
|
Type: "RFC 3339",
|
||||||
|
Example: "2006-01-02T15:04:05.999Z07:00",
|
||||||
|
},
|
||||||
|
"btime": {
|
||||||
|
Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.",
|
||||||
|
Type: "RFC 3339",
|
||||||
|
Example: "2006-01-02T15:04:05.999Z07:00",
|
||||||
|
},
|
||||||
|
"copy-requires-writer-permission": {
|
||||||
|
Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.",
|
||||||
|
Type: "boolean",
|
||||||
|
Example: "true",
|
||||||
|
},
|
||||||
|
"writers-can-share": {
|
||||||
|
Help: "Whether users with only writer permission can modify the file's permissions. Not populated and ignored when setting for items in shared drives.",
|
||||||
|
Type: "boolean",
|
||||||
|
Example: "false",
|
||||||
|
},
|
||||||
|
"viewed-by-me": {
|
||||||
|
Help: "Whether the file has been viewed by this user.",
|
||||||
|
Type: "boolean",
|
||||||
|
Example: "true",
|
||||||
|
ReadOnly: true,
|
||||||
|
},
|
||||||
|
"owner": {
|
||||||
|
Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.",
|
||||||
|
Type: "string",
|
||||||
|
Example: "user@example.com",
|
||||||
|
},
|
||||||
|
"permissions": {
|
||||||
|
Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.",
|
||||||
|
Type: "JSON",
|
||||||
|
Example: "{}",
|
||||||
|
},
|
||||||
|
"folder-color-rgb": {
|
||||||
|
Help: "The color for a folder or a shortcut to a folder as an RGB hex string.",
|
||||||
|
Type: "string",
|
||||||
|
Example: "881133",
|
||||||
|
},
|
||||||
|
"description": {
|
||||||
|
Help: "A short description of the file.",
|
||||||
|
Type: "string",
|
||||||
|
Example: "Contract for signing",
|
||||||
|
},
|
||||||
|
"starred": {
|
||||||
|
Help: "Whether the user has starred the file.",
|
||||||
|
Type: "boolean",
|
||||||
|
Example: "false",
|
||||||
|
},
|
||||||
|
"labels": {
|
||||||
|
Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.",
|
||||||
|
Type: "JSON",
|
||||||
|
Example: "[]",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extra fields we need to fetch to implement the system metadata above
|
||||||
|
var metadataFields = googleapi.Field(strings.Join([]string{
|
||||||
|
"copyRequiresWriterPermission",
|
||||||
|
"description",
|
||||||
|
"folderColorRgb",
|
||||||
|
"hasAugmentedPermissions",
|
||||||
|
"owners",
|
||||||
|
"permissionIds",
|
||||||
|
"permissions",
|
||||||
|
"properties",
|
||||||
|
"starred",
|
||||||
|
"viewedByMe",
|
||||||
|
"viewedByMeTime",
|
||||||
|
"writersCanShare",
|
||||||
|
}, ","))
|
||||||
|
|
||||||
|
// Fields we need to read from permissions
|
||||||
|
var permissionsFields = googleapi.Field(strings.Join([]string{
|
||||||
|
"*",
|
||||||
|
"permissionDetails/*",
|
||||||
|
}, ","))
|
||||||
|
|
||||||
|
// getPermission returns permissions for the fileID and permissionID passed in
|
||||||
|
func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) {
|
||||||
|
f.permissionsMu.Lock()
|
||||||
|
defer f.permissionsMu.Unlock()
|
||||||
|
if useCache {
|
||||||
|
perm = f.permissions[permissionID]
|
||||||
|
if perm != nil {
|
||||||
|
return perm, false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fs.Debugf(f, "Fetching permission %q", permissionID)
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
perm, err = f.svc.Permissions.Get(fileID, permissionID).
|
||||||
|
Fields(permissionsFields).
|
||||||
|
SupportsAllDrives(true).
|
||||||
|
Context(ctx).Do()
|
||||||
|
return f.shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited
|
||||||
|
|
||||||
|
cleanPermission(perm)
|
||||||
|
|
||||||
|
// cache the permission
|
||||||
|
f.permissions[permissionID] = perm
|
||||||
|
|
||||||
|
return perm, inherited, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the permissions on the info
|
||||||
|
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
|
||||||
|
errs := errcount.New()
|
||||||
|
for _, perm := range permissions {
|
||||||
|
if perm.Role == "owner" {
|
||||||
|
// ignore owner permissions - these are set with owner
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cleanPermissionForWrite(perm)
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
_, err := f.svc.Permissions.Create(info.Id, perm).
|
||||||
|
SupportsAllDrives(true).
|
||||||
|
SendNotificationEmail(false).
|
||||||
|
Context(ctx).Do()
|
||||||
|
return f.shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err)
|
||||||
|
errs.Add(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = errs.Err("failed to set permission")
|
||||||
|
if err != nil {
|
||||||
|
err = fserrors.NoRetryError(err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean attributes from permissions which we can't write
|
||||||
|
func cleanPermissionForWrite(perm *drive.Permission) {
|
||||||
|
perm.Deleted = false
|
||||||
|
perm.DisplayName = ""
|
||||||
|
perm.Id = ""
|
||||||
|
perm.Kind = ""
|
||||||
|
perm.PermissionDetails = nil
|
||||||
|
perm.TeamDrivePermissionDetails = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean and cache the permission if not already cached
|
||||||
|
func (f *Fs) cleanAndCachePermission(perm *drive.Permission) {
|
||||||
|
f.permissionsMu.Lock()
|
||||||
|
defer f.permissionsMu.Unlock()
|
||||||
|
cleanPermission(perm)
|
||||||
|
if _, found := f.permissions[perm.Id]; !found {
|
||||||
|
f.permissions[perm.Id] = perm
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean fields we don't need to keep from the permission
|
||||||
|
func cleanPermission(perm *drive.Permission) {
|
||||||
|
// DisplayName: Output only. The "pretty" name of the value of the
|
||||||
|
// permission. The following is a list of examples for each type of
|
||||||
|
// permission: * `user` - User's full name, as defined for their Google
|
||||||
|
// account, such as "Joe Smith." * `group` - Name of the Google Group,
|
||||||
|
// such as "The Company Administrators." * `domain` - String domain
|
||||||
|
// name, such as "thecompany.com." * `anyone` - No `displayName` is
|
||||||
|
// present.
|
||||||
|
perm.DisplayName = ""
|
||||||
|
|
||||||
|
// Kind: Output only. Identifies what kind of resource this is. Value:
|
||||||
|
// the fixed string "drive#permission".
|
||||||
|
perm.Kind = ""
|
||||||
|
|
||||||
|
// PermissionDetails: Output only. Details of whether the permissions on
|
||||||
|
// this shared drive item are inherited or directly on this item. This
|
||||||
|
// is an output-only field which is present only for shared drive items.
|
||||||
|
perm.PermissionDetails = nil
|
||||||
|
|
||||||
|
// PhotoLink: Output only. A link to the user's profile photo, if
|
||||||
|
// available.
|
||||||
|
perm.PhotoLink = ""
|
||||||
|
|
||||||
|
// TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use
|
||||||
|
// `permissionDetails` instead.
|
||||||
|
perm.TeamDrivePermissionDetails = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields we need to read from labels
|
||||||
|
var labelsFields = googleapi.Field(strings.Join([]string{
|
||||||
|
"*",
|
||||||
|
}, ","))
|
||||||
|
|
||||||
|
// getLabels returns labels for the fileID passed in
|
||||||
|
func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) {
|
||||||
|
fs.Debugf(f, "Fetching labels for %q", fileID)
|
||||||
|
listLabels := f.svc.Files.ListLabels(fileID).
|
||||||
|
Fields(labelsFields).
|
||||||
|
Context(ctx)
|
||||||
|
for {
|
||||||
|
var info *drive.LabelList
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
info, err = listLabels.Do()
|
||||||
|
return f.shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
labels = append(labels, info.Labels...)
|
||||||
|
if info.NextPageToken == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
listLabels.PageToken(info.NextPageToken)
|
||||||
|
}
|
||||||
|
for _, label := range labels {
|
||||||
|
cleanLabel(label)
|
||||||
|
}
|
||||||
|
return labels, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the labels on the info
|
||||||
|
func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) {
|
||||||
|
if len(labels) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
req := drive.ModifyLabelsRequest{}
|
||||||
|
for _, label := range labels {
|
||||||
|
req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{
|
||||||
|
FieldModifications: labelFieldsToFieldModifications(label.Fields),
|
||||||
|
LabelId: label.Id,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
_, err = f.svc.Files.ModifyLabels(info.Id, &req).
|
||||||
|
Context(ctx).Do()
|
||||||
|
return f.shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set labels: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert label fields into something which can set the fields
|
||||||
|
func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) {
|
||||||
|
for id, field := range fields {
|
||||||
|
var emails []string
|
||||||
|
for _, user := range field.User {
|
||||||
|
emails = append(emails, user.EmailAddress)
|
||||||
|
}
|
||||||
|
out = append(out, &drive.LabelFieldModification{
|
||||||
|
// FieldId: The ID of the field to be modified.
|
||||||
|
FieldId: id,
|
||||||
|
|
||||||
|
// SetDateValues: Replaces the value of a dateString Field with these
|
||||||
|
// new values. The string must be in the RFC 3339 full-date format:
|
||||||
|
// YYYY-MM-DD.
|
||||||
|
SetDateValues: field.DateString,
|
||||||
|
|
||||||
|
// SetIntegerValues: Replaces the value of an `integer` field with these
|
||||||
|
// new values.
|
||||||
|
SetIntegerValues: field.Integer,
|
||||||
|
|
||||||
|
// SetSelectionValues: Replaces a `selection` field with these new
|
||||||
|
// values.
|
||||||
|
SetSelectionValues: field.Selection,
|
||||||
|
|
||||||
|
// SetTextValues: Sets the value of a `text` field.
|
||||||
|
SetTextValues: field.Text,
|
||||||
|
|
||||||
|
// SetUserValues: Replaces a `user` field with these new values. The
|
||||||
|
// values must be valid email addresses.
|
||||||
|
SetUserValues: emails,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean fields we don't need to keep from the label
|
||||||
|
func cleanLabel(label *drive.Label) {
|
||||||
|
// Kind: This is always drive#label
|
||||||
|
label.Kind = ""
|
||||||
|
|
||||||
|
for name, field := range label.Fields {
|
||||||
|
// Kind: This is always drive#labelField.
|
||||||
|
field.Kind = ""
|
||||||
|
|
||||||
|
// Note the fields are copies so we need to write them
|
||||||
|
// back to the map
|
||||||
|
label.Fields[name] = field
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the metadata from drive item
|
||||||
|
//
|
||||||
|
// It should return nil if there is no Metadata
|
||||||
|
func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) {
|
||||||
|
metadata := make(fs.Metadata, 16)
|
||||||
|
|
||||||
|
// Dump user metadata first as it overrides system metadata
|
||||||
|
for k, v := range info.Properties {
|
||||||
|
metadata[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// System metadata
|
||||||
|
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
|
||||||
|
metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare)
|
||||||
|
metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe)
|
||||||
|
metadata["content-type"] = info.MimeType
|
||||||
|
|
||||||
|
// Owners: Output only. The owner of this file. Only certain legacy
|
||||||
|
// files may have more than one owner. This field isn't populated for
|
||||||
|
// items in shared drives.
|
||||||
|
if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 {
|
||||||
|
user := info.Owners[0]
|
||||||
|
if len(info.Owners) > 1 {
|
||||||
|
fs.Logf(o, "Ignoring more than 1 owner")
|
||||||
|
}
|
||||||
|
if user != nil {
|
||||||
|
id := user.EmailAddress
|
||||||
|
if id == "" {
|
||||||
|
id = user.DisplayName
|
||||||
|
}
|
||||||
|
metadata["owner"] = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.fs.opt.MetadataPermissions.IsSet(rwRead) {
|
||||||
|
// We only write permissions out if they are not inherited.
|
||||||
|
//
|
||||||
|
// On My Drives permissions seem to be attached to every item
|
||||||
|
// so they will always be written out.
|
||||||
|
//
|
||||||
|
// On Shared Drives only non-inherited permissions will be
|
||||||
|
// written out.
|
||||||
|
|
||||||
|
// To read the inherited permissions flag will mean we need to
|
||||||
|
// read the permissions for each object and the cache will be
|
||||||
|
// useless. However shared drives don't return permissions
|
||||||
|
// only permissionIds so will need to fetch them for each
|
||||||
|
// object. We use HasAugmentedPermissions to see if there are
|
||||||
|
// special permissions before fetching them to save transactions.
|
||||||
|
|
||||||
|
// HasAugmentedPermissions: Output only. Whether there are permissions
|
||||||
|
// directly on this file. This field is only populated for items in
|
||||||
|
// shared drives.
|
||||||
|
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
|
||||||
|
// Don't process permissions if there aren't any specifically set
|
||||||
|
fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds))
|
||||||
|
info.Permissions = nil
|
||||||
|
info.PermissionIds = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PermissionIds: Output only. List of permission IDs for users with
|
||||||
|
// access to this file.
|
||||||
|
//
|
||||||
|
// Only process these if we have no Permissions
|
||||||
|
if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 {
|
||||||
|
info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds))
|
||||||
|
g, gCtx := errgroup.WithContext(ctx)
|
||||||
|
g.SetLimit(o.fs.ci.Checkers)
|
||||||
|
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
||||||
|
for _, permissionID := range info.PermissionIds {
|
||||||
|
permissionID := permissionID
|
||||||
|
g.Go(func() error {
|
||||||
|
// must fetch the team drive ones individually to check the inherited flag
|
||||||
|
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read permission: %w", err)
|
||||||
|
}
|
||||||
|
// Don't write inherited permissions out
|
||||||
|
if inherited {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Don't write owner role out - these are covered by the owner metadata
|
||||||
|
if perm.Role == "owner" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
info.Permissions = append(info.Permissions, perm)
|
||||||
|
mu.Unlock()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
err = g.Wait()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Clean the fetched permissions
|
||||||
|
for _, perm := range info.Permissions {
|
||||||
|
o.fs.cleanAndCachePermission(perm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Permissions: Output only. The full list of permissions for the file.
|
||||||
|
// This is only available if the requesting user can share the file. Not
|
||||||
|
// populated for items in shared drives.
|
||||||
|
if len(info.Permissions) > 0 {
|
||||||
|
buf, err := json.Marshal(info.Permissions)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal permissions: %w", err)
|
||||||
|
}
|
||||||
|
metadata["permissions"] = string(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Permission propagation
|
||||||
|
// https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation
|
||||||
|
// Leads me to believe that in non shared drives, permissions
|
||||||
|
// are added to each item when you set permissions for a
|
||||||
|
// folder whereas in shared drives they are inherited and
|
||||||
|
// placed on the item directly.
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.FolderColorRgb != "" {
|
||||||
|
metadata["folder-color-rgb"] = info.FolderColorRgb
|
||||||
|
}
|
||||||
|
if info.Description != "" {
|
||||||
|
metadata["description"] = info.Description
|
||||||
|
}
|
||||||
|
metadata["starred"] = fmt.Sprint(info.Starred)
|
||||||
|
metadata["btime"] = info.CreatedTime
|
||||||
|
metadata["mtime"] = info.ModifiedTime
|
||||||
|
|
||||||
|
if o.fs.opt.MetadataLabels.IsSet(rwRead) {
|
||||||
|
// FIXME would be really nice if we knew if files had labels
|
||||||
|
// before listing but we need to know all possible label IDs
|
||||||
|
// to get it in the listing.
|
||||||
|
|
||||||
|
labels, err := o.fs.getLabels(ctx, actualID(info.Id))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to fetch labels: %w", err)
|
||||||
|
}
|
||||||
|
buf, err := json.Marshal(labels)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal labels: %w", err)
|
||||||
|
}
|
||||||
|
metadata["labels"] = string(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
o.metadata = &metadata
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the owner on the info
|
||||||
|
func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) {
|
||||||
|
perm := drive.Permission{
|
||||||
|
Role: "owner",
|
||||||
|
EmailAddress: owner,
|
||||||
|
// Type: The type of the grantee. Valid values are: * `user` * `group` *
|
||||||
|
// `domain` * `anyone` When creating a permission, if `type` is `user`
|
||||||
|
// or `group`, you must provide an `emailAddress` for the user or group.
|
||||||
|
// When `type` is `domain`, you must provide a `domain`. There isn't
|
||||||
|
// extra information required for an `anyone` type.
|
||||||
|
Type: "user",
|
||||||
|
}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
_, err = f.svc.Permissions.Create(info.Id, &perm).
|
||||||
|
SupportsAllDrives(true).
|
||||||
|
TransferOwnership(true).
|
||||||
|
// SendNotificationEmail(false). - required apparently!
|
||||||
|
Context(ctx).Do()
|
||||||
|
return f.shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set owner: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call back to set metadata that can't be set on the upload/update
|
||||||
|
//
|
||||||
|
// The *drive.File passed in holds the current state of the drive.File
|
||||||
|
// and this should update it with any modifications.
|
||||||
|
type updateMetadataFn func(context.Context, *drive.File) error
|
||||||
|
|
||||||
|
// read the metadata from meta and write it into updateInfo
|
||||||
|
//
|
||||||
|
// update should be true if this is being used to create metadata for
|
||||||
|
// an update/PATCH call as the rules on what can be updated are
|
||||||
|
// slightly different there.
|
||||||
|
//
|
||||||
|
// It returns a callback which should be called to finish the updates
|
||||||
|
// after the data is uploaded.
|
||||||
|
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
||||||
|
callbackFns := []updateMetadataFn{}
|
||||||
|
callback = func(ctx context.Context, info *drive.File) error {
|
||||||
|
for _, fn := range callbackFns {
|
||||||
|
err := fn(ctx, info)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// merge metadata into request and user metadata
|
||||||
|
for k, v := range meta {
|
||||||
|
k, v := k, v
|
||||||
|
// parse a boolean from v and write into out
|
||||||
|
parseBool := func(out *bool) error {
|
||||||
|
b, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err)
|
||||||
|
}
|
||||||
|
*out = b
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch k {
|
||||||
|
case "copy-requires-writer-permission":
|
||||||
|
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "writers-can-share":
|
||||||
|
if !f.isTeamDrive {
|
||||||
|
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fs.Debugf(f, "Ignoring %s=%s as can't set on shared drives", k, v)
|
||||||
|
}
|
||||||
|
case "viewed-by-me":
|
||||||
|
// Can't write this
|
||||||
|
case "content-type":
|
||||||
|
updateInfo.MimeType = v
|
||||||
|
case "owner":
|
||||||
|
if !f.opt.MetadataOwner.IsSet(rwWrite) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Can't set Owner on upload so need to set afterwards
|
||||||
|
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||||
|
err := f.setOwner(ctx, info, v)
|
||||||
|
if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) {
|
||||||
|
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
case "permissions":
|
||||||
|
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var perms []*drive.Permission
|
||||||
|
err := json.Unmarshal([]byte(v), &perms)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal permissions: %w", err)
|
||||||
|
}
|
||||||
|
// Can't set Permissions on upload so need to set afterwards
|
||||||
|
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||||
|
err := f.setPermissions(ctx, info, perms)
|
||||||
|
if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) {
|
||||||
|
// We've already logged the permissions errors individually here
|
||||||
|
fs.Debugf(f, "Ignoring error as failok is set: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
case "labels":
|
||||||
|
if !f.opt.MetadataLabels.IsSet(rwWrite) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var labels []*drive.Label
|
||||||
|
err := json.Unmarshal([]byte(v), &labels)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal labels: %w", err)
|
||||||
|
}
|
||||||
|
// Can't set Labels on upload so need to set afterwards
|
||||||
|
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||||
|
err := f.setLabels(ctx, info, labels)
|
||||||
|
if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) {
|
||||||
|
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
case "folder-color-rgb":
|
||||||
|
updateInfo.FolderColorRgb = v
|
||||||
|
case "description":
|
||||||
|
updateInfo.Description = v
|
||||||
|
case "starred":
|
||||||
|
if err := parseBool(&updateInfo.Starred); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "btime":
|
||||||
|
if update {
|
||||||
|
fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v)
|
||||||
|
} else {
|
||||||
|
updateInfo.CreatedTime = v
|
||||||
|
}
|
||||||
|
case "mtime":
|
||||||
|
updateInfo.ModifiedTime = v
|
||||||
|
default:
|
||||||
|
if updateInfo.Properties == nil {
|
||||||
|
updateInfo.Properties = make(map[string]string, 1)
|
||||||
|
}
|
||||||
|
updateInfo.Properties[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return callback, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch metadata and update updateInfo if --metadata is in use
|
||||||
|
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) {
|
||||||
|
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||||
|
}
|
||||||
|
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
||||||
|
}
|
||||||
|
return callback, nil
|
||||||
|
}
|
||||||
@@ -8,121 +8,19 @@ package dropbox
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
maxBatchSize = 1000 // max size the batch can be
|
|
||||||
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
|
|
||||||
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
|
|
||||||
defaultBatchSizeAsync = 100 // default batch size if async
|
|
||||||
)
|
|
||||||
|
|
||||||
// batcher holds info about the current items waiting for upload
|
|
||||||
type batcher struct {
|
|
||||||
f *Fs // Fs this batch is part of
|
|
||||||
mode string // configured batch mode
|
|
||||||
size int // maximum size for batch
|
|
||||||
timeout time.Duration // idle timeout for batch
|
|
||||||
async bool // whether we are using async batching
|
|
||||||
in chan batcherRequest // incoming items to batch
|
|
||||||
closed chan struct{} // close to indicate batcher shut down
|
|
||||||
atexit atexit.FnHandle // atexit handle
|
|
||||||
shutOnce sync.Once // make sure we shutdown once only
|
|
||||||
wg sync.WaitGroup // wait for shutdown
|
|
||||||
}
|
|
||||||
|
|
||||||
// batcherRequest holds an incoming request with a place for a reply
|
|
||||||
type batcherRequest struct {
|
|
||||||
commitInfo *files.UploadSessionFinishArg
|
|
||||||
result chan<- batcherResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return true if batcherRequest is the quit request
|
|
||||||
func (br *batcherRequest) isQuit() bool {
|
|
||||||
return br.commitInfo == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send this to get the engine to quit
|
|
||||||
var quitRequest = batcherRequest{}
|
|
||||||
|
|
||||||
// batcherResponse holds a response to be delivered to clients waiting
|
|
||||||
// for a batch to complete.
|
|
||||||
type batcherResponse struct {
|
|
||||||
err error
|
|
||||||
entry *files.FileMetadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBatcher creates a new batcher structure
|
|
||||||
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
|
||||||
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
|
||||||
if size > maxBatchSize || size < 0 {
|
|
||||||
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
async := false
|
|
||||||
|
|
||||||
switch mode {
|
|
||||||
case "sync":
|
|
||||||
if size <= 0 {
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
size = ci.Transfers
|
|
||||||
}
|
|
||||||
if timeout <= 0 {
|
|
||||||
timeout = defaultTimeoutSync
|
|
||||||
}
|
|
||||||
case "async":
|
|
||||||
if size <= 0 {
|
|
||||||
size = defaultBatchSizeAsync
|
|
||||||
}
|
|
||||||
if timeout <= 0 {
|
|
||||||
timeout = defaultTimeoutAsync
|
|
||||||
}
|
|
||||||
async = true
|
|
||||||
case "off":
|
|
||||||
size = 0
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
b := &batcher{
|
|
||||||
f: f,
|
|
||||||
mode: mode,
|
|
||||||
size: size,
|
|
||||||
timeout: timeout,
|
|
||||||
async: async,
|
|
||||||
in: make(chan batcherRequest, size),
|
|
||||||
closed: make(chan struct{}),
|
|
||||||
}
|
|
||||||
if b.Batching() {
|
|
||||||
b.atexit = atexit.Register(b.Shutdown)
|
|
||||||
b.wg.Add(1)
|
|
||||||
go b.commitLoop(context.Background())
|
|
||||||
}
|
|
||||||
return b, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Batching returns true if batching is active
|
|
||||||
func (b *batcher) Batching() bool {
|
|
||||||
return b.size > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||||
var arg = &files.UploadSessionFinishBatchArg{
|
var arg = &files.UploadSessionFinishBatchArg{
|
||||||
Entries: items,
|
Entries: items,
|
||||||
}
|
}
|
||||||
err = b.f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
|
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
||||||
// If error is insufficient space then don't retry
|
// If error is insufficient space then don't retry
|
||||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||||
@@ -139,23 +37,10 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
|||||||
return complete, nil
|
return complete, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// commit a batch
|
// Called by the batcher to commit a batch
|
||||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) {
|
||||||
// If commit fails then signal clients if sync
|
|
||||||
var signalled = b.async
|
|
||||||
defer func() {
|
|
||||||
if err != nil && !signalled {
|
|
||||||
// Signal to clients that there was an error
|
|
||||||
for _, result := range results {
|
|
||||||
result <- batcherResponse{err: err}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
|
|
||||||
fs.Debugf(b.f, "Committing %s", desc)
|
|
||||||
|
|
||||||
// finalise the batch getting either a result or a job id to poll
|
// finalise the batch getting either a result or a job id to poll
|
||||||
complete, err := b.finishBatch(ctx, items)
|
complete, err := f.finishBatch(ctx, items)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -166,19 +51,13 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
|||||||
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Report results to clients
|
// Format results for return
|
||||||
var (
|
|
||||||
errorTag = ""
|
|
||||||
errorCount = 0
|
|
||||||
)
|
|
||||||
for i := range results {
|
for i := range results {
|
||||||
item := entries[i]
|
item := entries[i]
|
||||||
resp := batcherResponse{}
|
|
||||||
if item.Tag == "success" {
|
if item.Tag == "success" {
|
||||||
resp.entry = item.Success
|
results[i] = item.Success
|
||||||
} else {
|
} else {
|
||||||
errorCount++
|
errorTag := item.Tag
|
||||||
errorTag = item.Tag
|
|
||||||
if item.Failure != nil {
|
if item.Failure != nil {
|
||||||
errorTag = item.Failure.Tag
|
errorTag = item.Failure.Tag
|
||||||
if item.Failure.LookupFailed != nil {
|
if item.Failure.LookupFailed != nil {
|
||||||
@@ -191,112 +70,9 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
|||||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
errorTag += "/" + item.Failure.PropertiesError.Tag
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
|
errors[i] = fmt.Errorf("upload failed: %s", errorTag)
|
||||||
}
|
|
||||||
if !b.async {
|
|
||||||
results[i] <- resp
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Show signalled so no need to report error to clients from now on
|
|
||||||
signalled = true
|
|
||||||
|
|
||||||
// Report an error if any failed in the batch
|
|
||||||
if errorTag != "" {
|
|
||||||
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(b.f, "Committed %s", desc)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// commitLoop runs the commit engine in the background
|
|
||||||
func (b *batcher) commitLoop(ctx context.Context) {
|
|
||||||
var (
|
|
||||||
items []*files.UploadSessionFinishArg // current batch of uncommitted files
|
|
||||||
results []chan<- batcherResponse // current batch of clients awaiting results
|
|
||||||
idleTimer = time.NewTimer(b.timeout)
|
|
||||||
commit = func() {
|
|
||||||
err := b.commitBatch(ctx, items, results)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
|
|
||||||
}
|
|
||||||
items, results = nil, nil
|
|
||||||
}
|
|
||||||
)
|
|
||||||
defer b.wg.Done()
|
|
||||||
defer idleTimer.Stop()
|
|
||||||
idleTimer.Stop()
|
|
||||||
|
|
||||||
outer:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case req := <-b.in:
|
|
||||||
if req.isQuit() {
|
|
||||||
break outer
|
|
||||||
}
|
|
||||||
items = append(items, req.commitInfo)
|
|
||||||
results = append(results, req.result)
|
|
||||||
idleTimer.Stop()
|
|
||||||
if len(items) >= b.size {
|
|
||||||
commit()
|
|
||||||
} else {
|
|
||||||
idleTimer.Reset(b.timeout)
|
|
||||||
}
|
|
||||||
case <-idleTimer.C:
|
|
||||||
if len(items) > 0 {
|
|
||||||
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
|
|
||||||
commit()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
// commit any remaining items
|
|
||||||
if len(items) > 0 {
|
|
||||||
commit()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown finishes any pending batches then shuts everything down
|
|
||||||
//
|
|
||||||
// Can be called from atexit handler
|
|
||||||
func (b *batcher) Shutdown() {
|
|
||||||
if !b.Batching() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.shutOnce.Do(func() {
|
|
||||||
atexit.Unregister(b.atexit)
|
|
||||||
fs.Infof(b.f, "Committing uploads - please wait...")
|
|
||||||
// show that batcher is shutting down
|
|
||||||
close(b.closed)
|
|
||||||
// quit the commitLoop by sending a quitRequest message
|
|
||||||
//
|
|
||||||
// Note that we don't close b.in because that will
|
|
||||||
// cause write to closed channel in Commit when we are
|
|
||||||
// exiting due to a signal.
|
|
||||||
b.in <- quitRequest
|
|
||||||
b.wg.Wait()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit commits the file using a batch call, first adding it to the
|
|
||||||
// batch and then waiting for the batch to complete in a synchronous
|
|
||||||
// way if async is not set.
|
|
||||||
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
|
|
||||||
select {
|
|
||||||
case <-b.closed:
|
|
||||||
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
|
|
||||||
resp := make(chan batcherResponse, 1)
|
|
||||||
b.in <- batcherRequest{
|
|
||||||
commitInfo: commitInfo,
|
|
||||||
result: resp,
|
|
||||||
}
|
|
||||||
// If running async then don't wait for the result
|
|
||||||
if b.async {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
result := <-resp
|
|
||||||
return result.entry, result.err
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -47,6 +47,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/batcher"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
@@ -121,6 +122,14 @@ var (
|
|||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
|
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
|
||||||
|
|
||||||
|
// Configure the batcher
|
||||||
|
defaultBatcherOptions = batcher.Options{
|
||||||
|
MaxBatchSize: 1000,
|
||||||
|
DefaultTimeoutSync: 500 * time.Millisecond,
|
||||||
|
DefaultTimeoutAsync: 10 * time.Second,
|
||||||
|
DefaultBatchSizeAsync: 100,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Gets an oauth config with the right scopes
|
// Gets an oauth config with the right scopes
|
||||||
@@ -152,7 +161,7 @@ func init() {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: fmt.Sprintf(`Upload chunk size (< %v).
|
Help: fmt.Sprintf(`Upload chunk size (< %v).
|
||||||
|
|
||||||
@@ -182,8 +191,9 @@ client_secret) to use this option as currently rclone's default set of
|
|||||||
permissions doesn't include "members.read". This can be added once
|
permissions doesn't include "members.read". This can be added once
|
||||||
v1.55 or later is in use everywhere.
|
v1.55 or later is in use everywhere.
|
||||||
`,
|
`,
|
||||||
Default: "",
|
Default: "",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "shared_files",
|
Name: "shared_files",
|
||||||
Help: `Instructs rclone to work on individual shared files.
|
Help: `Instructs rclone to work on individual shared files.
|
||||||
@@ -206,71 +216,12 @@ are supported.
|
|||||||
|
|
||||||
Note that we don't unmount the shared folder afterwards so the
|
Note that we don't unmount the shared folder afterwards so the
|
||||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||||
shared folder.`,
|
shared folder.
|
||||||
|
|
||||||
|
See also --dropbox-root-namespace for an alternative way to work with shared
|
||||||
|
folders.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "batch_mode",
|
|
||||||
Help: `Upload file batching sync|async|off.
|
|
||||||
|
|
||||||
This sets the batch mode used by rclone.
|
|
||||||
|
|
||||||
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
|
|
||||||
|
|
||||||
This has 3 possible values
|
|
||||||
|
|
||||||
- off - no batching
|
|
||||||
- sync - batch uploads and check completion (default)
|
|
||||||
- async - batch upload and don't check completion
|
|
||||||
|
|
||||||
Rclone will close any outstanding batches when it exits which may make
|
|
||||||
a delay on quit.
|
|
||||||
`,
|
|
||||||
Default: "sync",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "batch_size",
|
|
||||||
Help: `Max number of files in upload batch.
|
|
||||||
|
|
||||||
This sets the batch size of files to upload. It has to be less than 1000.
|
|
||||||
|
|
||||||
By default this is 0 which means rclone which calculate the batch size
|
|
||||||
depending on the setting of batch_mode.
|
|
||||||
|
|
||||||
- batch_mode: async - default batch_size is 100
|
|
||||||
- batch_mode: sync - default batch_size is the same as --transfers
|
|
||||||
- batch_mode: off - not in use
|
|
||||||
|
|
||||||
Rclone will close any outstanding batches when it exits which may make
|
|
||||||
a delay on quit.
|
|
||||||
|
|
||||||
Setting this is a great idea if you are uploading lots of small files
|
|
||||||
as it will make them a lot quicker. You can use --transfers 32 to
|
|
||||||
maximise throughput.
|
|
||||||
`,
|
|
||||||
Default: 0,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "batch_timeout",
|
|
||||||
Help: `Max time to allow an idle upload batch before uploading.
|
|
||||||
|
|
||||||
If an upload batch is idle for more than this long then it will be
|
|
||||||
uploaded.
|
|
||||||
|
|
||||||
The default for this is 0 which means rclone will choose a sensible
|
|
||||||
default based on the batch_mode in use.
|
|
||||||
|
|
||||||
- batch_mode: async - default batch_timeout is 10s
|
|
||||||
- batch_mode: sync - default batch_timeout is 500ms
|
|
||||||
- batch_mode: off - not in use
|
|
||||||
`,
|
|
||||||
Default: fs.Duration(0),
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "batch_commit_timeout",
|
|
||||||
Help: `Max time to wait for a batch to finish committing`,
|
|
||||||
Default: fs.Duration(10 * time.Minute),
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "pacer_min_sleep",
|
Name: "pacer_min_sleep",
|
||||||
Default: defaultMinSleep,
|
Default: defaultMinSleep,
|
||||||
@@ -289,23 +240,28 @@ default based on the batch_mode in use.
|
|||||||
encoder.EncodeDel |
|
encoder.EncodeDel |
|
||||||
encoder.EncodeRightSpace |
|
encoder.EncodeRightSpace |
|
||||||
encoder.EncodeInvalidUtf8,
|
encoder.EncodeInvalidUtf8,
|
||||||
}}...),
|
}, {
|
||||||
|
Name: "root_namespace",
|
||||||
|
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
|
||||||
|
Default: "",
|
||||||
|
Advanced: true,
|
||||||
|
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
Impersonate string `config:"impersonate"`
|
Impersonate string `config:"impersonate"`
|
||||||
SharedFiles bool `config:"shared_files"`
|
SharedFiles bool `config:"shared_files"`
|
||||||
SharedFolders bool `config:"shared_folders"`
|
SharedFolders bool `config:"shared_folders"`
|
||||||
BatchMode string `config:"batch_mode"`
|
BatchMode string `config:"batch_mode"`
|
||||||
BatchSize int `config:"batch_size"`
|
BatchSize int `config:"batch_size"`
|
||||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||||
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
AsyncBatch bool `config:"async_batch"`
|
||||||
AsyncBatch bool `config:"async_batch"`
|
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
RootNsid string `config:"root_namespace"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote dropbox server
|
// Fs represents a remote dropbox server
|
||||||
@@ -324,7 +280,7 @@ type Fs struct {
|
|||||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
ns string // The namespace we are using or "" for none
|
ns string // The namespace we are using or "" for none
|
||||||
batcher *batcher // batch builder
|
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a dropbox object
|
// Object describes a dropbox object
|
||||||
@@ -430,7 +386,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
oldToken = strings.TrimSpace(oldToken)
|
oldToken = strings.TrimSpace(oldToken)
|
||||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||||
fs.Infof(name, "Converting token to new format")
|
fs.Infof(name, "Converting token to new format")
|
||||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||||
@@ -450,7 +406,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
ci: ci,
|
ci: ci,
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
batcherOptions := defaultBatcherOptions
|
||||||
|
batcherOptions.Mode = f.opt.BatchMode
|
||||||
|
batcherOptions.Size = f.opt.BatchSize
|
||||||
|
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
|
||||||
|
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -477,15 +437,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
members := []*team.UserSelectorArg{&user}
|
members := []*team.UserSelectorArg{&user}
|
||||||
args := team.NewMembersGetInfoArgs(members)
|
args := team.NewMembersGetInfoArgs(members)
|
||||||
|
|
||||||
memberIds, err := f.team.MembersGetInfo(args)
|
memberIDs, err := f.team.MembersGetInfo(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||||
}
|
}
|
||||||
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
|
if len(memberIDs) == 0 || memberIDs[0].MemberInfo == nil || memberIDs[0].MemberInfo.Profile == nil {
|
||||||
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
cfg.AsMemberID = memberIDs[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||||
}
|
}
|
||||||
|
|
||||||
f.srv = files.New(cfg)
|
f.srv = files.New(cfg)
|
||||||
@@ -551,8 +511,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
f.features.Fill(ctx, f)
|
f.features.Fill(ctx, f)
|
||||||
|
|
||||||
// If root starts with / then use the actual root
|
if f.opt.RootNsid != "" {
|
||||||
if strings.HasPrefix(root, "/") {
|
f.ns = f.opt.RootNsid
|
||||||
|
fs.Debugf(f, "Overriding root namespace to %q", f.ns)
|
||||||
|
} else if strings.HasPrefix(root, "/") {
|
||||||
|
// If root starts with / then use the actual root
|
||||||
var acc *users.FullAccount
|
var acc *users.FullAccount
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
acc, err = f.users.GetCurrentAccount()
|
acc, err = f.users.GetCurrentAccount()
|
||||||
@@ -693,7 +656,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
return f.newObjectWithInfo(ctx, remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
// listSharedFolders lists all available shared folders mounted and not mounted
|
||||||
// we'll need the id later so we have to return them in original format
|
// we'll need the id later so we have to return them in original format
|
||||||
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
started := false
|
started := false
|
||||||
@@ -995,6 +958,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
if root == "/" {
|
if root == "/" {
|
||||||
return errors.New("can't remove root directory")
|
return errors.New("can't remove root directory")
|
||||||
}
|
}
|
||||||
|
encRoot := f.opt.Enc.FromStandardPath(root)
|
||||||
|
|
||||||
if check {
|
if check {
|
||||||
// check directory exists
|
// check directory exists
|
||||||
@@ -1003,10 +967,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
return fmt.Errorf("Rmdir: %w", err)
|
return fmt.Errorf("Rmdir: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
root = f.opt.Enc.FromStandardPath(root)
|
|
||||||
// check directory empty
|
// check directory empty
|
||||||
arg := files.ListFolderArg{
|
arg := files.ListFolderArg{
|
||||||
Path: root,
|
Path: encRoot,
|
||||||
Recursive: false,
|
Recursive: false,
|
||||||
}
|
}
|
||||||
if root == "/" {
|
if root == "/" {
|
||||||
@@ -1027,7 +990,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
|
|
||||||
// remove it
|
// remove it
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
|
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: encRoot})
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
@@ -1280,18 +1243,21 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var total uint64
|
var total uint64
|
||||||
|
used := q.Used
|
||||||
if q.Allocation != nil {
|
if q.Allocation != nil {
|
||||||
if q.Allocation.Individual != nil {
|
if q.Allocation.Individual != nil {
|
||||||
total += q.Allocation.Individual.Allocated
|
total += q.Allocation.Individual.Allocated
|
||||||
}
|
}
|
||||||
if q.Allocation.Team != nil {
|
if q.Allocation.Team != nil {
|
||||||
total += q.Allocation.Team.Allocated
|
total += q.Allocation.Team.Allocated
|
||||||
|
// Override used with Team.Used as this includes q.Used already
|
||||||
|
used = q.Allocation.Team.Used
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(int64(q.Used)), // bytes in use
|
Used: fs.NewUsageValue(int64(used)), // bytes in use
|
||||||
Free: fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
|
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
|
||||||
}
|
}
|
||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
@@ -1721,7 +1687,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
|||||||
// If we are batching then we should have written all the data now
|
// If we are batching then we should have written all the data now
|
||||||
// store the commit info now for a batch commit
|
// store the commit info now for a batch commit
|
||||||
if o.fs.batcher.Batching() {
|
if o.fs.batcher.Batching() {
|
||||||
return o.fs.batcher.Commit(ctx, args)
|
return o.fs.batcher.Commit(ctx, o.remote, args)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
|||||||
@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
|
|||||||
509, // Bandwidth Limit Exceeded
|
509, // Bandwidth Limit Exceeded
|
||||||
}
|
}
|
||||||
|
|
||||||
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
|
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
|
||||||
|
|
||||||
func parseFichierError(err error) int {
|
func parseFichierError(err error) int {
|
||||||
matches := errorRegex.FindStringSubmatch(err.Error())
|
matches := errorRegex.FindStringSubmatch(err.Error())
|
||||||
if len(matches) == 0 {
|
if len(matches) == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
code, err := strconv.Atoi(matches[0])
|
code, err := strconv.Atoi(matches[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
||||||
return 0
|
return 0
|
||||||
@@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||||||
return false, err // No such user
|
return false, err // No such user
|
||||||
case 186:
|
case 186:
|
||||||
return false, err // IP blocked?
|
return false, err // IP blocked?
|
||||||
case 374:
|
case 374, 412: // Flood detected seems to be #412 now
|
||||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
default:
|
default:
|
||||||
@@ -408,6 +408,32 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
|||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
|
||||||
|
request := &MoveDirRequest{
|
||||||
|
FolderID: folderID,
|
||||||
|
DestinationFolderID: destinationFolderID,
|
||||||
|
Rename: newLeaf,
|
||||||
|
// DestinationUser: destinationUser,
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/folder/mv.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = &MoveDirResponse{}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("couldn't move dir: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
||||||
request := &CopyFileRequest{
|
request := &CopyFileRequest{
|
||||||
URLs: []string{url},
|
URLs: []string{url},
|
||||||
|
|||||||
@@ -38,8 +38,9 @@ func init() {
|
|||||||
Description: "1Fichier",
|
Description: "1Fichier",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||||
Name: "api_key",
|
Name: "api_key",
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Help: "If you want to download a shared folder, add this parameter.",
|
Help: "If you want to download a shared folder, add this parameter.",
|
||||||
Name: "shared_folder",
|
Name: "shared_folder",
|
||||||
@@ -440,23 +441,28 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
|
srcFs := srcObj.fs
|
||||||
|
|
||||||
// Find current directory ID
|
// Find current directory ID
|
||||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If it is in the correct directory, just rename it
|
// If it is in the correct directory, just rename it
|
||||||
var url string
|
var url string
|
||||||
if currentDirectoryID == directoryID {
|
if srcDirectoryID == dstDirectoryID {
|
||||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
// No rename needed
|
||||||
|
if srcLeaf == dstLeaf {
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||||
}
|
}
|
||||||
@@ -465,11 +471,16 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
url = resp.URLs[0].URL
|
url = resp.URLs[0].URL
|
||||||
} else {
|
} else {
|
||||||
folderID, err := strconv.Atoi(directoryID)
|
dstFolderID, err := strconv.Atoi(dstDirectoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
rename := dstLeaf
|
||||||
|
// No rename needed
|
||||||
|
if srcLeaf == dstLeaf {
|
||||||
|
rename = ""
|
||||||
|
}
|
||||||
|
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||||
}
|
}
|
||||||
@@ -487,6 +498,51 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
|
// using server-side move operations.
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantDirMove.
|
||||||
|
//
|
||||||
|
// If destination exists then return fs.ErrorDirExists.
|
||||||
|
//
|
||||||
|
// This is complicated by the fact that we can't use moveDir to move
|
||||||
|
// to a different directory AND rename at the same time as it can
|
||||||
|
// overwrite files in the source directory.
|
||||||
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
|
srcFs, ok := src.(*Fs)
|
||||||
|
if !ok {
|
||||||
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
|
return fs.ErrorCantDirMove
|
||||||
|
}
|
||||||
|
|
||||||
|
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
srcIDnumeric, err := strconv.Atoi(srcID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp *MoveDirResponse
|
||||||
|
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("couldn't rename leaf: %w", err)
|
||||||
|
}
|
||||||
|
if resp.Status != "OK" {
|
||||||
|
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
srcFs.dirCache.FlushDir(srcRemote)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side move operations.
|
// Copy src to this remote using server side move operations.
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
@@ -560,6 +616,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
|
|||||||
@@ -70,6 +70,22 @@ type MoveFileResponse struct {
|
|||||||
URLs []string `json:"urls"`
|
URLs []string `json:"urls"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MoveDirRequest is the request structure of the corresponding request
|
||||||
|
type MoveDirRequest struct {
|
||||||
|
FolderID int `json:"folder_id"`
|
||||||
|
DestinationFolderID int `json:"destination_folder_id,omitempty"`
|
||||||
|
DestinationUser string `json:"destination_user"`
|
||||||
|
Rename string `json:"rename,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveDirResponse is the response structure of the corresponding request
|
||||||
|
type MoveDirResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
OldName string `json:"old_name"`
|
||||||
|
NewName string `json:"new_name"`
|
||||||
|
}
|
||||||
|
|
||||||
// CopyFileRequest is the request structure of the corresponding request
|
// CopyFileRequest is the request structure of the corresponding request
|
||||||
type CopyFileRequest struct {
|
type CopyFileRequest struct {
|
||||||
URLs []string `json:"urls"`
|
URLs []string `json:"urls"`
|
||||||
|
|||||||
@@ -84,6 +84,7 @@ Leave blank normally.
|
|||||||
|
|
||||||
Fill in to make rclone start with directory of a given ID.
|
Fill in to make rclone start with directory of a given ID.
|
||||||
`,
|
`,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "permanent_token",
|
Name: "permanent_token",
|
||||||
Help: `Permanent Authentication Token.
|
Help: `Permanent Authentication Token.
|
||||||
@@ -97,6 +98,7 @@ These tokens are normally valid for several years.
|
|||||||
|
|
||||||
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||||
`,
|
`,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "token",
|
Name: "token",
|
||||||
Help: `Session Token.
|
Help: `Session Token.
|
||||||
@@ -106,7 +108,8 @@ usually valid for 1 hour.
|
|||||||
|
|
||||||
Don't set this value - rclone will set it automatically.
|
Don't set this value - rclone will set it automatically.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "token_expiry",
|
Name: "token_expiry",
|
||||||
Help: `Token expiry time.
|
Help: `Token expiry time.
|
||||||
@@ -155,9 +158,9 @@ type Fs struct {
|
|||||||
tokenMu sync.Mutex // hold when reading the token
|
tokenMu sync.Mutex // hold when reading the token
|
||||||
token string // current access token
|
token string // current access token
|
||||||
tokenExpiry time.Time // time the current token expires
|
tokenExpiry time.Time // time the current token expires
|
||||||
tokenExpired int32 // read and written with atomic
|
tokenExpired atomic.Int32
|
||||||
canCopyWithName bool // set if detected that can use fi_name in copy
|
canCopyWithName bool // set if detected that can use fi_name in copy
|
||||||
precision time.Duration // precision reported
|
precision time.Duration // precision reported
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a filefabric object
|
// Object describes a filefabric object
|
||||||
@@ -240,7 +243,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
|
|||||||
err = status // return the error from the RPC
|
err = status // return the error from the RPC
|
||||||
code := status.GetCode()
|
code := status.GetCode()
|
||||||
if code == "login_token_expired" {
|
if code == "login_token_expired" {
|
||||||
atomic.AddInt32(&f.tokenExpired, 1)
|
f.tokenExpired.Add(1)
|
||||||
} else {
|
} else {
|
||||||
for _, retryCode := range retryStatusCodes {
|
for _, retryCode := range retryStatusCodes {
|
||||||
if code == retryCode.code {
|
if code == retryCode.code {
|
||||||
@@ -320,12 +323,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
|||||||
var refreshed = false
|
var refreshed = false
|
||||||
defer func() {
|
defer func() {
|
||||||
if refreshed {
|
if refreshed {
|
||||||
atomic.StoreInt32(&f.tokenExpired, 0)
|
f.tokenExpired.Store(0)
|
||||||
}
|
}
|
||||||
f.tokenMu.Unlock()
|
f.tokenMu.Unlock()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
expired := atomic.LoadInt32(&f.tokenExpired) != 0
|
expired := f.tokenExpired.Load() != 0
|
||||||
if expired {
|
if expired {
|
||||||
fs.Debugf(f, "Token invalid - refreshing")
|
fs.Debugf(f, "Token invalid - refreshing")
|
||||||
}
|
}
|
||||||
|
|||||||
901
backend/filescom/filescom.go
Normal file
901
backend/filescom/filescom.go
Normal file
@@ -0,0 +1,901 @@
|
|||||||
|
// Package filescom provides an interface to the Files.com
|
||||||
|
// object storage system.
|
||||||
|
package filescom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
files_sdk "github.com/Files-com/files-sdk-go/v3"
|
||||||
|
"github.com/Files-com/files-sdk-go/v3/bundle"
|
||||||
|
"github.com/Files-com/files-sdk-go/v3/file"
|
||||||
|
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
|
||||||
|
"github.com/Files-com/files-sdk-go/v3/folder"
|
||||||
|
"github.com/Files-com/files-sdk-go/v3/session"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Run of rclone info
|
||||||
|
stringNeedsEscaping = []rune{
|
||||||
|
'/', '\x00'
|
||||||
|
}
|
||||||
|
maxFileLength = 512 // for 1 byte unicode characters
|
||||||
|
maxFileLength = 512 // for 2 byte unicode characters
|
||||||
|
maxFileLength = 512 // for 3 byte unicode characters
|
||||||
|
maxFileLength = 512 // for 4 byte unicode characters
|
||||||
|
canWriteUnnormalized = true
|
||||||
|
canReadUnnormalized = true
|
||||||
|
canReadRenormalized = true
|
||||||
|
canStream = true
|
||||||
|
*/
|
||||||
|
|
||||||
|
const (
|
||||||
|
minSleep = 10 * time.Millisecond
|
||||||
|
maxSleep = 2 * time.Second
|
||||||
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
|
|
||||||
|
folderNotEmpty = "processing-failure/folder-not-empty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register with Fs
|
||||||
|
func init() {
|
||||||
|
fs.Register(&fs.RegInfo{
|
||||||
|
Name: "filescom",
|
||||||
|
Description: "Files.com",
|
||||||
|
NewFs: NewFs,
|
||||||
|
Options: []fs.Option{
|
||||||
|
{
|
||||||
|
Name: "site",
|
||||||
|
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
|
||||||
|
}, {
|
||||||
|
Name: "username",
|
||||||
|
Help: "The username used to authenticate with Files.com.",
|
||||||
|
}, {
|
||||||
|
Name: "password",
|
||||||
|
Help: "The password used to authenticate with Files.com.",
|
||||||
|
IsPassword: true,
|
||||||
|
}, {
|
||||||
|
Name: "api_key",
|
||||||
|
Help: "The API key used to authenticate with Files.com.",
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: config.ConfigEncoding,
|
||||||
|
Help: config.ConfigEncodingHelp,
|
||||||
|
Advanced: true,
|
||||||
|
Default: (encoder.Display |
|
||||||
|
encoder.EncodeBackSlash |
|
||||||
|
encoder.EncodeRightSpace |
|
||||||
|
encoder.EncodeRightCrLfHtVt |
|
||||||
|
encoder.EncodeInvalidUtf8),
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Site string `config:"site"`
|
||||||
|
Username string `config:"username"`
|
||||||
|
Password string `config:"password"`
|
||||||
|
APIKey string `config:"api_key"`
|
||||||
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs represents a remote files.com server
|
||||||
|
type Fs struct {
|
||||||
|
name string // name of this remote
|
||||||
|
root string // the path we are working on
|
||||||
|
opt Options // parsed options
|
||||||
|
features *fs.Features // optional features
|
||||||
|
fileClient *file.Client // the connection to the file API
|
||||||
|
folderClient *folder.Client // the connection to the folder API
|
||||||
|
migrationClient *file_migration.Client // the connection to the file migration API
|
||||||
|
bundleClient *bundle.Client // the connection to the bundle API
|
||||||
|
pacer *fs.Pacer // pacer for API calls
|
||||||
|
}
|
||||||
|
|
||||||
|
// Object describes a files object
|
||||||
|
//
|
||||||
|
// Will definitely have info but maybe not meta
|
||||||
|
type Object struct {
|
||||||
|
fs *Fs // what this object is part of
|
||||||
|
remote string // The remote path
|
||||||
|
size int64 // size of the object
|
||||||
|
crc32 string // CRC32 of the object content
|
||||||
|
md5 string // MD5 of the object content
|
||||||
|
mimeType string // Content-Type of the object
|
||||||
|
modTime time.Time // modification time of the object
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
|
// Name of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// String converts this Fs to a string
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return fmt.Sprintf("files root '%s'", f.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode remote and turn it into an absolute path in the share
|
||||||
|
func (f *Fs) absPath(remote string) string {
|
||||||
|
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
|
var retryErrorCodes = []int{
|
||||||
|
429, // Too Many Requests.
|
||||||
|
500, // Internal Server Error
|
||||||
|
502, // Bad Gateway
|
||||||
|
503, // Service Unavailable
|
||||||
|
504, // Gateway Timeout
|
||||||
|
509, // Bandwidth Limit Exceeded
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||||
|
// retried. It returns the err as a convenience
|
||||||
|
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||||
|
for _, e := range retryErrorCodes {
|
||||||
|
if apiErr.HttpCode == e {
|
||||||
|
fs.Debugf(nil, "Retrying API error %v", err)
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fserrors.ShouldRetry(err), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMetaDataForPath reads the metadata from the path
|
||||||
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
|
||||||
|
params := files_sdk.FileFindParams{
|
||||||
|
Path: f.absPath(path),
|
||||||
|
}
|
||||||
|
|
||||||
|
var file files_sdk.File
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs constructs an Fs from the path, container:path
|
||||||
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
root = strings.Trim(root, "/")
|
||||||
|
|
||||||
|
config, err := newClientConfig(ctx, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
|
fileClient: &file.Client{Config: config},
|
||||||
|
folderClient: &folder.Client{Config: config},
|
||||||
|
migrationClient: &file_migration.Client{Config: config},
|
||||||
|
bundleClient: &bundle.Client{Config: config},
|
||||||
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
|
}
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
CaseInsensitive: true,
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
ReadMimeType: true,
|
||||||
|
DirModTimeUpdatesOnWrite: true,
|
||||||
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
|
if f.root != "" {
|
||||||
|
info, err := f.readMetaDataForPath(ctx, "")
|
||||||
|
if err == nil && !info.IsDir() {
|
||||||
|
f.root = path.Dir(f.root)
|
||||||
|
if f.root == "." {
|
||||||
|
f.root = ""
|
||||||
|
}
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return f, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
||||||
|
if opt.Site != "" {
|
||||||
|
if strings.Contains(opt.Site, ".") {
|
||||||
|
config.EndpointOverride = opt.Site
|
||||||
|
} else {
|
||||||
|
config.Subdomain = opt.Site
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = url.ParseRequestURI(config.Endpoint())
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
|
||||||
|
|
||||||
|
if opt.APIKey != "" {
|
||||||
|
config.APIKey = opt.APIKey
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.Username == "" {
|
||||||
|
err = errors.New("username not found")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if opt.Password == "" {
|
||||||
|
err = errors.New("password not found")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
opt.Password, err = obscure.Reveal(opt.Password)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sessionClient := session.Client{Config: config}
|
||||||
|
params := files_sdk.SessionCreateParams{
|
||||||
|
Username: opt.Username,
|
||||||
|
Password: opt.Password,
|
||||||
|
}
|
||||||
|
|
||||||
|
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("couldn't create session: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config.SessionId = thisSession.Id
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an Object from a path
|
||||||
|
//
|
||||||
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
|
||||||
|
o := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if file != nil {
|
||||||
|
err = o.setMetaData(file)
|
||||||
|
} else {
|
||||||
|
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the objects and directories in dir into entries. The
|
||||||
|
// entries can be returned in any order but should be for a
|
||||||
|
// complete directory.
|
||||||
|
//
|
||||||
|
// dir should be "" to list the root, and should not have
|
||||||
|
// trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
var it *folder.Iter
|
||||||
|
params := files_sdk.FolderListForParams{
|
||||||
|
Path: f.absPath(dir),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for it.Next() {
|
||||||
|
item := ptr(it.File())
|
||||||
|
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
|
||||||
|
remote = path.Join(dir, remote)
|
||||||
|
if remote == dir {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if item.IsDir() {
|
||||||
|
d := fs.NewDir(remote, item.ModTime())
|
||||||
|
entries = append(entries, d)
|
||||||
|
} else {
|
||||||
|
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entries = append(entries, o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = it.Err()
|
||||||
|
if files_sdk.IsNotExist(err) {
|
||||||
|
return nil, fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates from the parameters passed in a half finished Object which
|
||||||
|
// must have setMetaData called on it
|
||||||
|
//
|
||||||
|
// Returns the object and error.
|
||||||
|
//
|
||||||
|
// Used to create new objects
|
||||||
|
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
|
||||||
|
// Create the directory for the object if it doesn't exist
|
||||||
|
err = f.mkParentDir(ctx, remote)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Temporary Object under construction
|
||||||
|
o = &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the object
|
||||||
|
//
|
||||||
|
// Copy the reader in to the new object which is returned.
|
||||||
|
//
|
||||||
|
// The new object may have been created if an error is returned
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
// Temporary Object under construction
|
||||||
|
fs := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: src.Remote(),
|
||||||
|
}
|
||||||
|
return fs, fs.Update(ctx, in, src, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
return f.Put(ctx, in, src, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) mkdir(ctx context.Context, path string) error {
|
||||||
|
if path == "" || path == "." {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
params := files_sdk.FolderCreateParams{
|
||||||
|
Path: path,
|
||||||
|
MkdirParents: ptr(true),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if files_sdk.IsExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the parent directory of remote
|
||||||
|
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||||
|
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir creates the container if it doesn't exist
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
return f.mkdir(ctx, f.absPath(dir))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirSetModTime sets the directory modtime for dir
|
||||||
|
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||||
|
o := Object{
|
||||||
|
fs: f,
|
||||||
|
remote: dir,
|
||||||
|
}
|
||||||
|
return o.SetModTime(ctx, modTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// purgeCheck removes the root directory, if check is set then it
|
||||||
|
// refuses to do so if it has anything in
|
||||||
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||||
|
path := f.absPath(dir)
|
||||||
|
if path == "" || path == "." {
|
||||||
|
return errors.New("can't purge root directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
params := files_sdk.FileDeleteParams{
|
||||||
|
Path: path,
|
||||||
|
Recursive: ptr(!check),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||||
|
// Allow for eventual consistency deletion of child objects.
|
||||||
|
if isFolderNotEmpty(err) {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if files_sdk.IsNotExist(err) {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
} else if isFolderNotEmpty(err) {
|
||||||
|
return fs.ErrorDirectoryNotEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("rmdir failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir deletes the root folder
|
||||||
|
//
|
||||||
|
// Returns an error if it isn't empty
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
return f.purgeCheck(ctx, dir, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision return the precision of this Fs
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy src to this remote using server-side copy operations.
|
||||||
|
//
|
||||||
|
// This is stored with the remote path given.
|
||||||
|
//
|
||||||
|
// It returns the destination Object and a possible error.
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
|
||||||
|
srcObj, ok := src.(*Object)
|
||||||
|
if !ok {
|
||||||
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
|
return nil, fs.ErrorCantCopy
|
||||||
|
}
|
||||||
|
err = srcObj.readMetaData(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
srcPath := srcObj.fs.absPath(srcObj.remote)
|
||||||
|
dstPath := f.absPath(remote)
|
||||||
|
if strings.EqualFold(srcPath, dstPath) {
|
||||||
|
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create temporary object
|
||||||
|
dstObj, err = f.createObject(ctx, remote)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the object
|
||||||
|
params := files_sdk.FileCopyParams{
|
||||||
|
Path: srcPath,
|
||||||
|
Destination: dstPath,
|
||||||
|
Overwrite: ptr(true),
|
||||||
|
}
|
||||||
|
|
||||||
|
var action files_sdk.FileAction
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = f.waitForAction(ctx, action, "copy")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dstObj.SetModTime(ctx, srcObj.modTime)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge deletes all the files and the container
|
||||||
|
//
|
||||||
|
// Optional interface: Only implement this if you have a way of
|
||||||
|
// deleting all the files quicker than just running Remove() on the
|
||||||
|
// result of List()
|
||||||
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
|
return f.purgeCheck(ctx, dir, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// move a file or folder
|
||||||
|
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
|
||||||
|
// Move the object
|
||||||
|
params := files_sdk.FileMoveParams{
|
||||||
|
Path: src.absPath(srcRemote),
|
||||||
|
Destination: f.absPath(dstRemote),
|
||||||
|
}
|
||||||
|
|
||||||
|
var action files_sdk.FileAction
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = f.waitForAction(ctx, action, "move")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
|
||||||
|
var migration files_sdk.FileMigration
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
|
||||||
|
// noop
|
||||||
|
}, files_sdk.WithContext(ctx))
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err == nil && migration.Status != "completed" {
|
||||||
|
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move src to this remote using server-side move operations.
|
||||||
|
//
|
||||||
|
// This is stored with the remote path given.
|
||||||
|
//
|
||||||
|
// It returns the destination Object and a possible error.
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
|
srcObj, ok := src.(*Object)
|
||||||
|
if !ok {
|
||||||
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
|
return nil, fs.ErrorCantMove
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create temporary object
|
||||||
|
dstObj, err := f.createObject(ctx, remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do the move
|
||||||
|
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dstObj.setMetaData(info)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dstObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
|
// using server-side move operations.
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
|
//
|
||||||
|
// If destination exists then return fs.ErrorDirExists
|
||||||
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||||
|
srcFs, ok := src.(*Fs)
|
||||||
|
if !ok {
|
||||||
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
|
return fs.ErrorCantDirMove
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if destination exists
|
||||||
|
_, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||||
|
if err == nil {
|
||||||
|
return fs.ErrorDirExists
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create temporary object
|
||||||
|
dstObj, err := f.createObject(ctx, dstRemote)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do the move
|
||||||
|
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||||
|
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
|
||||||
|
params := files_sdk.BundleCreateParams{
|
||||||
|
Paths: []string{f.absPath(remote)},
|
||||||
|
}
|
||||||
|
if expire < fs.DurationOff {
|
||||||
|
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var bundle files_sdk.Bundle
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
url = bundle.Url
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes returns the supported hash sets.
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return hash.NewHashSet(hash.CRC32, hash.MD5)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
|
// Fs returns the parent Fs
|
||||||
|
func (o *Object) Fs() fs.Info {
|
||||||
|
return o.fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a string version
|
||||||
|
func (o *Object) String() string {
|
||||||
|
if o == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the remote path
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||||
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
|
switch t {
|
||||||
|
case hash.CRC32:
|
||||||
|
if o.crc32 == "" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%08s", o.crc32), nil
|
||||||
|
case hash.MD5:
|
||||||
|
return o.md5, nil
|
||||||
|
}
|
||||||
|
return "", hash.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of an object in bytes
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
return o.size
|
||||||
|
}
|
||||||
|
|
||||||
|
// setMetaData sets the metadata from info
|
||||||
|
func (o *Object) setMetaData(file *files_sdk.File) error {
|
||||||
|
o.modTime = file.ModTime()
|
||||||
|
|
||||||
|
if !file.IsDir() {
|
||||||
|
o.size = file.Size
|
||||||
|
o.crc32 = file.Crc32
|
||||||
|
o.md5 = file.Md5
|
||||||
|
o.mimeType = file.MimeType
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
|
//
|
||||||
|
// it also sets the info
|
||||||
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
|
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||||
|
if err != nil {
|
||||||
|
if files_sdk.IsNotExist(err) {
|
||||||
|
return fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if file.IsDir() {
|
||||||
|
return fs.ErrorIsDir
|
||||||
|
}
|
||||||
|
return o.setMetaData(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the object
|
||||||
|
//
|
||||||
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
|
// LastModified returned in the http headers
|
||||||
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
return o.modTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime sets the modification time of the local fs object
|
||||||
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||||
|
params := files_sdk.FileUpdateParams{
|
||||||
|
Path: o.fs.absPath(o.remote),
|
||||||
|
ProvidedMtime: &modTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
var file files_sdk.File
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return o.setMetaData(&file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable returns a boolean showing whether this object storable
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open an object for read
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
// Offset and Count for range download
|
||||||
|
var offset, count int64
|
||||||
|
fs.FixRangeOption(options, o.size)
|
||||||
|
for _, option := range options {
|
||||||
|
switch x := option.(type) {
|
||||||
|
case *fs.RangeOption:
|
||||||
|
offset, count = x.Decode(o.size)
|
||||||
|
if count < 0 {
|
||||||
|
count = o.size - offset
|
||||||
|
}
|
||||||
|
case *fs.SeekOption:
|
||||||
|
offset = x.Offset
|
||||||
|
count = o.size - offset
|
||||||
|
default:
|
||||||
|
if option.Mandatory() {
|
||||||
|
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
params := files_sdk.FileDownloadParams{
|
||||||
|
Path: o.fs.absPath(o.remote),
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := &http.Header{}
|
||||||
|
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
_, err = o.fs.fileClient.Download(
|
||||||
|
params,
|
||||||
|
files_sdk.WithContext(ctx),
|
||||||
|
files_sdk.RequestHeadersOption(headers),
|
||||||
|
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
|
||||||
|
in = closer
|
||||||
|
return err
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a pointer to t - useful for returning pointers to constants
|
||||||
|
func ptr[T any](t T) *T {
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
|
func isFolderNotEmpty(err error) bool {
|
||||||
|
var re files_sdk.ResponseError
|
||||||
|
ok := errors.As(err, &re)
|
||||||
|
return ok && re.Type == folderNotEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
|
//
|
||||||
|
// If existing is set then it updates the object rather than creating a new one.
|
||||||
|
//
|
||||||
|
// The new object may have been created if an error is returned.
|
||||||
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
uploadOpts := []file.UploadOption{
|
||||||
|
file.UploadWithContext(ctx),
|
||||||
|
file.UploadWithReader(in),
|
||||||
|
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
|
||||||
|
file.UploadWithProvidedMtime(src.ModTime(ctx)),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
err := o.fs.fileClient.Upload(uploadOpts...)
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.readMetaData(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove an object
|
||||||
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
|
params := files_sdk.FileDeleteParams{
|
||||||
|
Path: o.fs.absPath(o.remote),
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// MimeType of an Object if known, "" otherwise
|
||||||
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
|
return o.mimeType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.Purger = (*Fs)(nil)
|
||||||
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
|
_ fs.Copier = (*Fs)(nil)
|
||||||
|
_ fs.Mover = (*Fs)(nil)
|
||||||
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
|
_ fs.Object = (*Object)(nil)
|
||||||
|
_ fs.MimeTyper = (*Object)(nil)
|
||||||
|
)
|
||||||
17
backend/filescom/filescom_test.go
Normal file
17
backend/filescom/filescom_test.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
// Test Files filesystem interface
|
||||||
|
package filescom_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/filescom"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestFilesCom:",
|
||||||
|
NilObject: (*filescom.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/ftp"
|
"github.com/jlaffaye/ftp"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -28,6 +28,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/env"
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/proxy"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -48,13 +49,15 @@ func init() {
|
|||||||
Description: "FTP",
|
Description: "FTP",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "host",
|
Name: "host",
|
||||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||||
Required: true,
|
Required: true,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "FTP username.",
|
Help: "FTP username.",
|
||||||
Default: currentUser,
|
Default: currentUser,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "port",
|
Name: "port",
|
||||||
Help: "FTP port number.",
|
Help: "FTP port number.",
|
||||||
@@ -82,7 +85,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
|||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "concurrency",
|
Name: "concurrency",
|
||||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||||
|
|
||||||
Note that setting this is very likely to cause deadlocks so it should
|
Note that setting this is very likely to cause deadlocks so it should
|
||||||
be used with care.
|
be used with care.
|
||||||
@@ -96,7 +99,7 @@ maximum of |--checkers| and |--transfers|.
|
|||||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||||
--check-first| or |--checkers 1 --transfers 1|.
|
--check-first| or |--checkers 1 --transfers 1|.
|
||||||
|
|
||||||
`, "|", "`", -1),
|
`, "|", "`"),
|
||||||
Default: 0,
|
Default: 0,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -170,6 +173,34 @@ Enabled by default. Use 0 to disable.`,
|
|||||||
Help: `Allow asking for FTP password when needed.
|
Help: `Allow asking for FTP password when needed.
|
||||||
|
|
||||||
If this is set and no password is supplied then rclone will ask for a password
|
If this is set and no password is supplied then rclone will ask for a password
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "socks_proxy",
|
||||||
|
Default: "",
|
||||||
|
Help: `Socks 5 proxy host.
|
||||||
|
|
||||||
|
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
myUser:myPass@localhost:9005
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_check_upload",
|
||||||
|
Default: false,
|
||||||
|
Help: `Don't check the upload is OK
|
||||||
|
|
||||||
|
Normally rclone will try to check the upload exists after it has
|
||||||
|
uploaded a file to make sure the size and modification time are as
|
||||||
|
expected.
|
||||||
|
|
||||||
|
This flag stops rclone doing these checks. This enables uploading to
|
||||||
|
folders which are write only.
|
||||||
|
|
||||||
|
You will likely need to use the --inplace flag also if uploading to
|
||||||
|
a write only folder.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -216,6 +247,8 @@ type Options struct {
|
|||||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||||
AskPassword bool `config:"ask_password"`
|
AskPassword bool `config:"ask_password"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
|
SocksProxy string `config:"socks_proxy"`
|
||||||
|
NoCheckUpload bool `config:"no_check_upload"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote FTP server
|
// Fs represents a remote FTP server
|
||||||
@@ -233,7 +266,6 @@ type Fs struct {
|
|||||||
pool []*ftp.ServerConn
|
pool []*ftp.ServerConn
|
||||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||||
tokens *pacer.TokenDispenser
|
tokens *pacer.TokenDispenser
|
||||||
tlsConf *tls.Config
|
|
||||||
pacer *fs.Pacer // pacer for FTP connections
|
pacer *fs.Pacer // pacer for FTP connections
|
||||||
fGetTime bool // true if the ftp library accepts GetTime
|
fGetTime bool // true if the ftp library accepts GetTime
|
||||||
fSetTime bool // true if the ftp library accepts SetTime
|
fSetTime bool // true if the ftp library accepts SetTime
|
||||||
@@ -346,10 +378,36 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
return fserrors.ShouldRetry(err), err
|
return fserrors.ShouldRetry(err), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get a TLS config with a unique session cache.
|
||||||
|
//
|
||||||
|
// We can't share session caches between connections.
|
||||||
|
//
|
||||||
|
// See: https://github.com/rclone/rclone/issues/7234
|
||||||
|
func (f *Fs) tlsConfig() *tls.Config {
|
||||||
|
var tlsConfig *tls.Config
|
||||||
|
if f.opt.TLS || f.opt.ExplicitTLS {
|
||||||
|
tlsConfig = &tls.Config{
|
||||||
|
ServerName: f.opt.Host,
|
||||||
|
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||||
|
}
|
||||||
|
if f.opt.TLSCacheSize > 0 {
|
||||||
|
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
||||||
|
}
|
||||||
|
if f.opt.DisableTLS13 {
|
||||||
|
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tlsConfig
|
||||||
|
}
|
||||||
|
|
||||||
// Open a new connection to the FTP server.
|
// Open a new connection to the FTP server.
|
||||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
fs.Debugf(f, "Connecting to FTP server")
|
fs.Debugf(f, "Connecting to FTP server")
|
||||||
|
|
||||||
|
// tls.Config for this connection only. Will be used for data
|
||||||
|
// and control connections.
|
||||||
|
tlsConfig := f.tlsConfig()
|
||||||
|
|
||||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||||
initialConnection := true
|
initialConnection := true
|
||||||
dial := func(network, address string) (conn net.Conn, err error) {
|
dial := func(network, address string) (conn net.Conn, err error) {
|
||||||
@@ -357,12 +415,17 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||||
}()
|
}()
|
||||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
baseDialer := fshttp.NewDialer(ctx)
|
||||||
|
if f.opt.SocksProxy != "" {
|
||||||
|
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||||
|
} else {
|
||||||
|
conn, err = baseDialer.Dial(network, address)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Connect using cleartext only for non TLS
|
// Connect using cleartext only for non TLS
|
||||||
if f.tlsConf == nil {
|
if tlsConfig == nil {
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
// Initial connection only needs to be cleartext for explicit TLS
|
// Initial connection only needs to be cleartext for explicit TLS
|
||||||
@@ -371,7 +434,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
// Upgrade connection to TLS
|
// Upgrade connection to TLS
|
||||||
tlsConn := tls.Client(conn, f.tlsConf)
|
tlsConn := tls.Client(conn, tlsConfig)
|
||||||
// Do the initial handshake - tls.Client doesn't do it for us
|
// Do the initial handshake - tls.Client doesn't do it for us
|
||||||
// If we do this then connections to proftpd/pureftpd lock up
|
// If we do this then connections to proftpd/pureftpd lock up
|
||||||
// See: https://github.com/rclone/rclone/issues/6426
|
// See: https://github.com/rclone/rclone/issues/6426
|
||||||
@@ -393,9 +456,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
if f.opt.TLS {
|
if f.opt.TLS {
|
||||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||||
// as a trigger for sending PSBZ and PROT options to server.
|
// as a trigger for sending PSBZ and PROT options to server.
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||||
} else if f.opt.ExplicitTLS {
|
} else if f.opt.ExplicitTLS {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
||||||
}
|
}
|
||||||
if f.opt.DisableEPSV {
|
if f.opt.DisableEPSV {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||||
@@ -550,19 +613,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
if opt.TLS && opt.ExplicitTLS {
|
if opt.TLS && opt.ExplicitTLS {
|
||||||
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
||||||
}
|
}
|
||||||
var tlsConfig *tls.Config
|
|
||||||
if opt.TLS || opt.ExplicitTLS {
|
|
||||||
tlsConfig = &tls.Config{
|
|
||||||
ServerName: opt.Host,
|
|
||||||
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
|
||||||
}
|
|
||||||
if opt.TLSCacheSize > 0 {
|
|
||||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
|
|
||||||
}
|
|
||||||
if opt.DisableTLS13 {
|
|
||||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
|
||||||
}
|
|
||||||
}
|
|
||||||
u := protocol + path.Join(dialAddr+"/", root)
|
u := protocol + path.Join(dialAddr+"/", root)
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
@@ -575,7 +625,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
pass: pass,
|
pass: pass,
|
||||||
dialAddr: dialAddr,
|
dialAddr: dialAddr,
|
||||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||||
tlsConf: tlsConfig,
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -938,6 +987,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
|||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if errX := textprotoError(err); errX != nil {
|
if errX := textprotoError(err); errX != nil {
|
||||||
switch errX.Code {
|
switch errX.Code {
|
||||||
|
case ftp.StatusRequestedFileActionOK: // some ftp servers apparently return 250 instead of 257
|
||||||
|
err = nil // see: https://forum.rclone.org/t/rclone-pop-up-an-i-o-error-when-creating-a-folder-in-a-mounted-ftp-drive/44368/
|
||||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||||
err = nil
|
err = nil
|
||||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||||
@@ -1269,6 +1320,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return fmt.Errorf("update stor: %w", err)
|
return fmt.Errorf("update stor: %w", err)
|
||||||
}
|
}
|
||||||
o.fs.putFtpConnection(&c, nil)
|
o.fs.putFtpConnection(&c, nil)
|
||||||
|
if o.fs.opt.NoCheckUpload {
|
||||||
|
o.info = &FileInfo{
|
||||||
|
Name: o.remote,
|
||||||
|
Size: uint64(src.Size()),
|
||||||
|
ModTime: src.ModTime(ctx),
|
||||||
|
precise: true,
|
||||||
|
IsDir: false,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||||
return fmt.Errorf("SetModTime: %w", err)
|
return fmt.Errorf("SetModTime: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
311
backend/gofile/api/types.go
Normal file
311
backend/gofile/api/types.go
Normal file
@@ -0,0 +1,311 @@
|
|||||||
|
// Package api has type definitions for gofile
|
||||||
|
//
|
||||||
|
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// 2017-05-03T07:26:10-07:00
|
||||||
|
timeFormat = `"` + time.RFC3339 + `"`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Time represents date and time information for the
|
||||||
|
// gofile API, by using RFC3339
|
||||||
|
type Time time.Time
|
||||||
|
|
||||||
|
// MarshalJSON turns a Time into JSON (in UTC)
|
||||||
|
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||||
|
timeString := (*time.Time)(t).Format(timeFormat)
|
||||||
|
return []byte(timeString), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON turns JSON into a Time
|
||||||
|
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||||
|
newT, err := time.Parse(timeFormat, string(data))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = Time(newT)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error is returned from gofile when things go wrong
|
||||||
|
type Error struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns a string for the error and satisfies the error interface
|
||||||
|
func (e Error) Error() string {
|
||||||
|
out := fmt.Sprintf("Error %q", e.Status)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsError returns true if there is an error
|
||||||
|
func (e Error) IsError() bool {
|
||||||
|
return e.Status != "ok"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns err if not nil, or e if IsError or nil
|
||||||
|
func (e Error) Err(err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if e.IsError() {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check Error satisfies the error interface
|
||||||
|
var _ error = (*Error)(nil)
|
||||||
|
|
||||||
|
// Types of things in Item
|
||||||
|
const (
|
||||||
|
ItemTypeFolder = "folder"
|
||||||
|
ItemTypeFile = "file"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Item describes a folder or a file as returned by /contents
|
||||||
|
type Item struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
ParentFolder string `json:"parentFolder"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Code string `json:"code"`
|
||||||
|
CreateTime int64 `json:"createTime"`
|
||||||
|
ModTime int64 `json:"modTime"`
|
||||||
|
Link string `json:"link"`
|
||||||
|
MD5 string `json:"md5"`
|
||||||
|
MimeType string `json:"mimetype"`
|
||||||
|
ChildrenCount int `json:"childrenCount"`
|
||||||
|
DirectLinks map[string]*DirectLink `json:"directLinks"`
|
||||||
|
//Public bool `json:"public"`
|
||||||
|
//ServerSelected string `json:"serverSelected"`
|
||||||
|
//Thumbnail string `json:"thumbnail"`
|
||||||
|
//DownloadCount int `json:"downloadCount"`
|
||||||
|
//TotalDownloadCount int64 `json:"totalDownloadCount"`
|
||||||
|
//TotalSize int64 `json:"totalSize"`
|
||||||
|
//ChildrenIDs []string `json:"childrenIds"`
|
||||||
|
Children map[string]*Item `json:"children"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToNativeTime converts a go time to a native time
|
||||||
|
func ToNativeTime(t time.Time) int64 {
|
||||||
|
return t.Unix()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromNativeTime converts native time to a go time
|
||||||
|
func FromNativeTime(t int64) time.Time {
|
||||||
|
return time.Unix(t, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirectLink describes a direct link to a file so it can be
|
||||||
|
// downloaded by third parties.
|
||||||
|
type DirectLink struct {
|
||||||
|
ExpireTime int64 `json:"expireTime"`
|
||||||
|
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||||
|
DomainsAllowed []any `json:"domainsAllowed"`
|
||||||
|
Auth []any `json:"auth"`
|
||||||
|
IsReqLink bool `json:"isReqLink"`
|
||||||
|
DirectLink string `json:"directLink"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contents is returned from the /contents call
|
||||||
|
type Contents struct {
|
||||||
|
Error
|
||||||
|
Data struct {
|
||||||
|
Item
|
||||||
|
} `json:"data"`
|
||||||
|
Metadata Metadata `json:"metadata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metadata is returned when paging is in use
|
||||||
|
type Metadata struct {
|
||||||
|
TotalCount int `json:"totalCount"`
|
||||||
|
TotalPages int `json:"totalPages"`
|
||||||
|
Page int `json:"page"`
|
||||||
|
PageSize int `json:"pageSize"`
|
||||||
|
HasNextPage bool `json:"hasNextPage"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountsGetID is the result of /accounts/getid
|
||||||
|
type AccountsGetID struct {
|
||||||
|
Error
|
||||||
|
Data struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats of storage and traffic
|
||||||
|
type Stats struct {
|
||||||
|
FolderCount int64 `json:"folderCount"`
|
||||||
|
FileCount int64 `json:"fileCount"`
|
||||||
|
Storage int64 `json:"storage"`
|
||||||
|
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
|
||||||
|
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
|
||||||
|
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountsGet is the result of /accounts/{id}
|
||||||
|
type AccountsGet struct {
|
||||||
|
Error
|
||||||
|
Data struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
Tier string `json:"tier"`
|
||||||
|
PremiumType string `json:"premiumType"`
|
||||||
|
Token string `json:"token"`
|
||||||
|
RootFolder string `json:"rootFolder"`
|
||||||
|
SubscriptionProvider string `json:"subscriptionProvider"`
|
||||||
|
SubscriptionEndDate int `json:"subscriptionEndDate"`
|
||||||
|
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
|
||||||
|
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
|
||||||
|
StatsCurrent Stats `json:"statsCurrent"`
|
||||||
|
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateFolderRequest is the input to /contents/createFolder
|
||||||
|
type CreateFolderRequest struct {
|
||||||
|
ParentFolderID string `json:"parentFolderId"`
|
||||||
|
FolderName string `json:"folderName"`
|
||||||
|
ModTime int64 `json:"modTime,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateFolderResponse is the output from /contents/createFolder
|
||||||
|
type CreateFolderResponse struct {
|
||||||
|
Error
|
||||||
|
Data Item `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteRequest is the input to DELETE /contents
|
||||||
|
type DeleteRequest struct {
|
||||||
|
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteResponse is the input to DELETE /contents
|
||||||
|
type DeleteResponse struct {
|
||||||
|
Error
|
||||||
|
Data map[string]Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server is an upload server
|
||||||
|
type Server struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Zone string `json:"zone"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the Server
|
||||||
|
func (s *Server) String() string {
|
||||||
|
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root returns the root URL for the server
|
||||||
|
func (s *Server) Root() string {
|
||||||
|
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// URL returns the upload URL for the server
|
||||||
|
func (s *Server) URL() string {
|
||||||
|
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServersResponse is the output from /servers
|
||||||
|
type ServersResponse struct {
|
||||||
|
Error
|
||||||
|
Data struct {
|
||||||
|
Servers []Server `json:"servers"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadResponse is returned by POST /contents/uploadfile
|
||||||
|
type UploadResponse struct {
|
||||||
|
Error
|
||||||
|
Data Item `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirectLinksRequest specifies the parameters for the direct link
|
||||||
|
type DirectLinksRequest struct {
|
||||||
|
ExpireTime int64 `json:"expireTime,omitempty"`
|
||||||
|
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
|
||||||
|
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
|
||||||
|
Auth []any `json:"auth,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirectLinksResult is returned from POST /contents/{id}/directlinks
|
||||||
|
type DirectLinksResult struct {
|
||||||
|
Error
|
||||||
|
Data struct {
|
||||||
|
ExpireTime int64 `json:"expireTime"`
|
||||||
|
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||||
|
DomainsAllowed []any `json:"domainsAllowed"`
|
||||||
|
Auth []any `json:"auth"`
|
||||||
|
IsReqLink bool `json:"isReqLink"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
DirectLink string `json:"directLink"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
|
||||||
|
//
|
||||||
|
// The Value of the attribute to define :
|
||||||
|
// For Attribute "name" : The name of the content (file or folder)
|
||||||
|
// For Attribute "description" : The description displayed on the download page (folder only)
|
||||||
|
// For Attribute "tags" : A comma-separated list of tags (folder only)
|
||||||
|
// For Attribute "public" : either true or false (folder only)
|
||||||
|
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
|
||||||
|
// For Attribute "password" : The password to set (folder only)
|
||||||
|
type UpdateItemRequest struct {
|
||||||
|
Attribute string `json:"attribute"`
|
||||||
|
Value any `json:"attributeValue"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateItemResponse is returned by PUT /contents/{id}/update
|
||||||
|
type UpdateItemResponse struct {
|
||||||
|
Error
|
||||||
|
Data Item `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveRequest is the input to /contents/move
|
||||||
|
type MoveRequest struct {
|
||||||
|
FolderID string `json:"folderId"`
|
||||||
|
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveResponse is returned by POST /contents/move
|
||||||
|
type MoveResponse struct {
|
||||||
|
Error
|
||||||
|
Data map[string]struct {
|
||||||
|
Error
|
||||||
|
Item `json:"data"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyRequest is the input to /contents/copy
|
||||||
|
type CopyRequest struct {
|
||||||
|
FolderID string `json:"folderId"`
|
||||||
|
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyResponse is returned by POST /contents/copy
|
||||||
|
type CopyResponse struct {
|
||||||
|
Error
|
||||||
|
Data map[string]struct {
|
||||||
|
Error
|
||||||
|
Item `json:"data"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadServerStatus is returned when fetching the root of an upload server
|
||||||
|
type UploadServerStatus struct {
|
||||||
|
Error
|
||||||
|
Data struct {
|
||||||
|
Server string `json:"server"`
|
||||||
|
Test string `json:"test"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
1646
backend/gofile/gofile.go
Normal file
1646
backend/gofile/gofile.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/gofile/gofile_test.go
Normal file
17
backend/gofile/gofile_test.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
// Test Gofile filesystem interface
|
||||||
|
package gofile_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/gofile"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestGoFile:",
|
||||||
|
NilObject: (*gofile.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -60,16 +60,14 @@ const (
|
|||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// Description of how to auth for this app
|
||||||
// Description of how to auth for this app
|
var storageConfig = &oauth2.Config{
|
||||||
storageConfig = &oauth2.Config{
|
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
Endpoint: google.Endpoint,
|
||||||
Endpoint: google.Endpoint,
|
ClientID: rcloneClientID,
|
||||||
ClientID: rcloneClientID,
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
}
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
@@ -91,18 +89,27 @@ func init() {
|
|||||||
})
|
})
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "project_number",
|
Name: "project_number",
|
||||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "user_project",
|
Name: "user_project",
|
||||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_file",
|
Name: "service_account_file",
|
||||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_credentials",
|
Name: "service_account_credentials",
|
||||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "access_token",
|
||||||
|
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
|
||||||
|
Hide: fs.OptionHideConfigurator,
|
||||||
|
Sensitive: true,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "anonymous",
|
Name: "anonymous",
|
||||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||||
@@ -376,6 +383,7 @@ type Options struct {
|
|||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
EnvAuth bool `config:"env_auth"`
|
EnvAuth bool `config:"env_auth"`
|
||||||
DirectoryMarkers bool `config:"directory_markers"`
|
DirectoryMarkers bool `config:"directory_markers"`
|
||||||
|
AccessToken string `config:"access_token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
@@ -532,6 +540,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||||
}
|
}
|
||||||
|
} else if opt.AccessToken != "" {
|
||||||
|
ts := oauth2.Token{AccessToken: opt.AccessToken}
|
||||||
|
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
|
||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -694,7 +705,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
// is this a directory marker?
|
// is this a directory marker?
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
// Don't insert the root directory
|
// Don't insert the root directory
|
||||||
if remote == directory {
|
if remote == f.opt.Enc.ToStandardPath(directory) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// process directory markers as directories
|
// process directory markers as directories
|
||||||
@@ -941,7 +952,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||||
@@ -1307,10 +1317,11 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
url := o.url
|
||||||
if o.fs.opt.UserProject != "" {
|
if o.fs.opt.UserProject != "" {
|
||||||
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
|
url += "&userProject=" + o.fs.opt.UserProject
|
||||||
}
|
}
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,8 +56,7 @@ type MediaItem struct {
|
|||||||
CreationTime time.Time `json:"creationTime"`
|
CreationTime time.Time `json:"creationTime"`
|
||||||
Width string `json:"width"`
|
Width string `json:"width"`
|
||||||
Height string `json:"height"`
|
Height string `json:"height"`
|
||||||
Photo struct {
|
Photo struct{} `json:"photo"`
|
||||||
} `json:"photo"`
|
|
||||||
} `json:"mediaMetadata"`
|
} `json:"mediaMetadata"`
|
||||||
Filename string `json:"filename"`
|
Filename string `json:"filename"`
|
||||||
}
|
}
|
||||||
@@ -68,7 +67,7 @@ type MediaItems struct {
|
|||||||
NextPageToken string `json:"nextPageToken"`
|
NextPageToken string `json:"nextPageToken"`
|
||||||
}
|
}
|
||||||
|
|
||||||
//Content categories
|
// Content categories
|
||||||
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
||||||
// LANDSCAPES Media items containing landscapes.
|
// LANDSCAPES Media items containing landscapes.
|
||||||
// RECEIPTS Media items containing receipts.
|
// RECEIPTS Media items containing receipts.
|
||||||
@@ -187,5 +186,5 @@ type BatchCreateResponse struct {
|
|||||||
|
|
||||||
// BatchRemoveItems is for removing items from an album
|
// BatchRemoveItems is for removing items from an album
|
||||||
type BatchRemoveItems struct {
|
type BatchRemoveItems struct {
|
||||||
MediaItemIds []string `json:"mediaItemIds"`
|
MediaItemIDs []string `json:"mediaItemIds"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/log"
|
"github.com/rclone/rclone/lib/batcher"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
@@ -71,6 +71,14 @@ var (
|
|||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Configure the batcher
|
||||||
|
defaultBatcherOptions = batcher.Options{
|
||||||
|
MaxBatchSize: 50,
|
||||||
|
DefaultTimeoutSync: 1000 * time.Millisecond,
|
||||||
|
DefaultTimeoutAsync: 10 * time.Second,
|
||||||
|
DefaultBatchSizeAsync: 50,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -111,7 +119,7 @@ will count towards storage in your Google Account.`)
|
|||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "read_only",
|
Name: "read_only",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Set to make the Google Photos backend read only.
|
Help: `Set to make the Google Photos backend read only.
|
||||||
@@ -151,6 +159,34 @@ listings and transferred.
|
|||||||
Without this flag, archived media will not be visible in directory
|
Without this flag, archived media will not be visible in directory
|
||||||
listings and won't be transferred.`,
|
listings and won't be transferred.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "proxy",
|
||||||
|
Default: "",
|
||||||
|
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
|
||||||
|
|
||||||
|
The Google API will deliver images and video which aren't full
|
||||||
|
resolution, and/or have EXIF data missing.
|
||||||
|
|
||||||
|
However if you ue the gphotosdl proxy tnen you can download original,
|
||||||
|
unchanged images.
|
||||||
|
|
||||||
|
This runs a headless browser in the background.
|
||||||
|
|
||||||
|
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
|
||||||
|
|
||||||
|
First run with
|
||||||
|
|
||||||
|
gphotosdl -login
|
||||||
|
|
||||||
|
Then once you have logged into google photos close the browser window
|
||||||
|
and run
|
||||||
|
|
||||||
|
gphotosdl
|
||||||
|
|
||||||
|
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
|
||||||
|
rclone use the proxy.
|
||||||
|
`, "|", "`"),
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -158,7 +194,7 @@ listings and won't be transferred.`,
|
|||||||
Default: (encoder.Base |
|
Default: (encoder.Base |
|
||||||
encoder.EncodeCrLf |
|
encoder.EncodeCrLf |
|
||||||
encoder.EncodeInvalidUtf8),
|
encoder.EncodeInvalidUtf8),
|
||||||
}}...),
|
}}...), defaultBatcherOptions.FsOptions("")...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -169,6 +205,10 @@ type Options struct {
|
|||||||
StartYear int `config:"start_year"`
|
StartYear int `config:"start_year"`
|
||||||
IncludeArchived bool `config:"include_archived"`
|
IncludeArchived bool `config:"include_archived"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
|
BatchMode string `config:"batch_mode"`
|
||||||
|
BatchSize int `config:"batch_size"`
|
||||||
|
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||||
|
Proxy string `config:"proxy"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
@@ -187,6 +227,7 @@ type Fs struct {
|
|||||||
uploadedMu sync.Mutex // to protect the below
|
uploadedMu sync.Mutex // to protect the below
|
||||||
uploaded dirtree.DirTree // record of uploaded items
|
uploaded dirtree.DirTree // record of uploaded items
|
||||||
createMu sync.Mutex // held when creating albums to prevent dupes
|
createMu sync.Mutex // held when creating albums to prevent dupes
|
||||||
|
batcher *batcher.Batcher[uploadedItem, *api.MediaItem]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a storage object
|
// Object describes a storage object
|
||||||
@@ -267,7 +308,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
||||||
body = []byte("Image not found or broken")
|
body = []byte("Image not found or broken")
|
||||||
}
|
}
|
||||||
var e = api.Error{
|
e := api.Error{
|
||||||
Details: api.ErrorDetails{
|
Details: api.ErrorDetails{
|
||||||
Code: resp.StatusCode,
|
Code: resp.StatusCode,
|
||||||
Message: string(body),
|
Message: string(body),
|
||||||
@@ -312,6 +353,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
albums: map[bool]*albums{},
|
albums: map[bool]*albums{},
|
||||||
uploaded: dirtree.New(),
|
uploaded: dirtree.New(),
|
||||||
}
|
}
|
||||||
|
batcherOptions := defaultBatcherOptions
|
||||||
|
batcherOptions.Mode = f.opt.BatchMode
|
||||||
|
batcherOptions.Size = f.opt.BatchSize
|
||||||
|
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
|
||||||
|
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
@@ -433,7 +482,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
|
|||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
defer log.Trace(f, "remote=%q", remote)("")
|
// defer log.Trace(f, "remote=%q", remote)("")
|
||||||
return f.newObjectWithInfo(ctx, remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -599,9 +648,7 @@ func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if entry != nil {
|
entries = append(entries, entry)
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -648,7 +695,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||||
if pattern == nil || pattern.isFile {
|
if pattern == nil || pattern.isFile {
|
||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
@@ -665,7 +712,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
defer log.Trace(f, "src=%+v", src)("")
|
// defer log.Trace(f, "src=%+v", src)("")
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
@@ -681,7 +728,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
|||||||
Path: "/albums",
|
Path: "/albums",
|
||||||
Parameters: url.Values{},
|
Parameters: url.Values{},
|
||||||
}
|
}
|
||||||
var request = api.CreateAlbum{
|
request := api.CreateAlbum{
|
||||||
Album: &api.Album{
|
Album: &api.Album{
|
||||||
Title: albumTitle,
|
Title: albumTitle,
|
||||||
},
|
},
|
||||||
@@ -718,7 +765,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
|
|||||||
|
|
||||||
// Mkdir creates the album if it doesn't exist
|
// Mkdir creates the album if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||||
if pattern == nil {
|
if pattern == nil {
|
||||||
return fs.ErrorDirNotFound
|
return fs.ErrorDirNotFound
|
||||||
@@ -742,7 +789,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
// defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||||
match, _, pattern := patterns.match(f.root, dir, false)
|
match, _, pattern := patterns.match(f.root, dir, false)
|
||||||
if pattern == nil {
|
if pattern == nil {
|
||||||
return fs.ErrorDirNotFound
|
return fs.ErrorDirNotFound
|
||||||
@@ -781,6 +828,13 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return hash.Set(hash.None)
|
return hash.Set(hash.None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown the backend, closing any background tasks and any
|
||||||
|
// cached connections.
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
f.batcher.Shutdown()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
// Fs returns the parent Fs
|
||||||
@@ -808,7 +862,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// Size returns the size of an object in bytes
|
// Size returns the size of an object in bytes
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
defer log.Trace(o, "")("")
|
// defer log.Trace(o, "")("")
|
||||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||||
return o.bytes
|
return o.bytes
|
||||||
}
|
}
|
||||||
@@ -909,7 +963,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
defer log.Trace(o, "")("")
|
// defer log.Trace(o, "")("")
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||||
@@ -939,16 +993,20 @@ func (o *Object) downloadURL() string {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
defer log.Trace(o, "")("")
|
// defer log.Trace(o, "")("")
|
||||||
err = o.readMetaData(ctx)
|
err = o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
url := o.downloadURL()
|
||||||
|
if o.fs.opt.Proxy != "" {
|
||||||
|
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
|
||||||
|
}
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
RootURL: o.downloadURL(),
|
RootURL: url,
|
||||||
Options: options,
|
Options: options,
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -961,11 +1019,87 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return resp.Body, err
|
return resp.Body, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// input to the batcher
|
||||||
|
type uploadedItem struct {
|
||||||
|
AlbumID string // desired album
|
||||||
|
UploadToken string // upload ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit a batch of items to albumID returning the errors in errors
|
||||||
|
func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error, albumID string) {
|
||||||
|
// Create the media item from an UploadToken, optionally adding to an album
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/mediaItems:batchCreate",
|
||||||
|
}
|
||||||
|
request := api.BatchCreateRequest{
|
||||||
|
AlbumID: albumID,
|
||||||
|
}
|
||||||
|
itemsInBatch := 0
|
||||||
|
for i := range items {
|
||||||
|
if items[i].AlbumID == albumID {
|
||||||
|
request.NewMediaItems = append(request.NewMediaItems, api.NewMediaItem{
|
||||||
|
SimpleMediaItem: api.SimpleMediaItem{
|
||||||
|
UploadToken: items[i].UploadToken,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
itemsInBatch++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var result api.BatchCreateResponse
|
||||||
|
var resp *http.Response
|
||||||
|
var err error
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("failed to create media item: %w", err)
|
||||||
|
}
|
||||||
|
if err == nil && len(result.NewMediaItemResults) != itemsInBatch {
|
||||||
|
err = fmt.Errorf("bad response to BatchCreate expecting %d items but got %d", itemsInBatch, len(result.NewMediaItemResults))
|
||||||
|
}
|
||||||
|
j := 0
|
||||||
|
for i := range items {
|
||||||
|
if items[i].AlbumID == albumID {
|
||||||
|
if err == nil {
|
||||||
|
media := &result.NewMediaItemResults[j]
|
||||||
|
if media.Status.Code != 0 {
|
||||||
|
errors[i] = fmt.Errorf("upload failed: %s (%d)", media.Status.Message, media.Status.Code)
|
||||||
|
} else {
|
||||||
|
results[i] = &media.MediaItem
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
errors[i] = err
|
||||||
|
}
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called by the batcher to commit a batch
|
||||||
|
func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error) (err error) {
|
||||||
|
// Discover all the AlbumIDs as we have to upload these separately
|
||||||
|
//
|
||||||
|
// Should maybe have one batcher per AlbumID
|
||||||
|
albumIDs := map[string]struct{}{}
|
||||||
|
for i := range items {
|
||||||
|
albumIDs[items[i].AlbumID] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// batch the albums
|
||||||
|
for albumID := range albumIDs {
|
||||||
|
// errors returned in errors
|
||||||
|
f.commitBatchAlbumID(ctx, items, results, errors, albumID)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||||
return errCantUpload
|
return errCantUpload
|
||||||
@@ -1021,37 +1155,29 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return errors.New("empty upload token")
|
return errors.New("empty upload token")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the media item from an UploadToken, optionally adding to an album
|
uploaded := uploadedItem{
|
||||||
opts = rest.Opts{
|
AlbumID: albumID,
|
||||||
Method: "POST",
|
UploadToken: uploadToken,
|
||||||
Path: "/mediaItems:batchCreate",
|
|
||||||
}
|
}
|
||||||
var request = api.BatchCreateRequest{
|
|
||||||
AlbumID: albumID,
|
// Save the upload into an album
|
||||||
NewMediaItems: []api.NewMediaItem{
|
var info *api.MediaItem
|
||||||
{
|
if o.fs.batcher.Batching() {
|
||||||
SimpleMediaItem: api.SimpleMediaItem{
|
info, err = o.fs.batcher.Commit(ctx, o.remote, uploaded)
|
||||||
UploadToken: uploadToken,
|
} else {
|
||||||
},
|
errors := make([]error, 1)
|
||||||
},
|
results := make([]*api.MediaItem, 1)
|
||||||
},
|
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
|
||||||
|
if err != nil {
|
||||||
|
err = errors[0]
|
||||||
|
info = results[0]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
var result api.BatchCreateResponse
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create media item: %w", err)
|
return fmt.Errorf("failed to commit batch: %w", err)
|
||||||
}
|
}
|
||||||
if len(result.NewMediaItemResults) != 1 {
|
|
||||||
return errors.New("bad response to BatchCreate wrong number of items")
|
o.setMetaData(info)
|
||||||
}
|
|
||||||
mediaItemResult := result.NewMediaItemResults[0]
|
|
||||||
if mediaItemResult.Status.Code != 0 {
|
|
||||||
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
|
||||||
}
|
|
||||||
o.setMetaData(&mediaItemResult.MediaItem)
|
|
||||||
|
|
||||||
// Add upload to internal storage
|
// Add upload to internal storage
|
||||||
if pattern.isUpload {
|
if pattern.isUpload {
|
||||||
@@ -1078,8 +1204,8 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
|
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
|
||||||
NoResponse: true,
|
NoResponse: true,
|
||||||
}
|
}
|
||||||
var request = api.BatchRemoveItems{
|
request := api.BatchRemoveItems{
|
||||||
MediaItemIds: []string{o.id},
|
MediaItemIDs: []string{o.id},
|
||||||
}
|
}
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ type dirPattern struct {
|
|||||||
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// dirPatters is a slice of all the directory patterns
|
// dirPatterns is a slice of all the directory patterns
|
||||||
type dirPatterns []dirPattern
|
type dirPatterns []dirPattern
|
||||||
|
|
||||||
// patterns describes the layout of the google photos backend file system.
|
// patterns describes the layout of the google photos backend file system.
|
||||||
|
|||||||
@@ -80,6 +80,14 @@ func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
|||||||
}
|
}
|
||||||
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
||||||
}
|
}
|
||||||
|
if f.db == nil {
|
||||||
|
if f.opt.MaxAge == 0 {
|
||||||
|
fs.Errorf(f, "db not found. (disabled with max_age = 0)")
|
||||||
|
} else {
|
||||||
|
fs.Errorf(f, "db not found.")
|
||||||
|
}
|
||||||
|
return kv.ErrInactive
|
||||||
|
}
|
||||||
op := &kvDump{
|
op := &kvDump{
|
||||||
full: full,
|
full: full,
|
||||||
root: root,
|
root: root,
|
||||||
|
|||||||
@@ -114,6 +114,13 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
|||||||
root: rpath,
|
root: rpath,
|
||||||
opt: opt,
|
opt: opt,
|
||||||
}
|
}
|
||||||
|
// Correct root if definitely pointing to a file
|
||||||
|
if err == fs.ErrorIsFile {
|
||||||
|
f.root = path.Dir(f.root)
|
||||||
|
if f.root == "." || f.root == "/" {
|
||||||
|
f.root = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
baseFeatures := baseFs.Features()
|
baseFeatures := baseFs.Features()
|
||||||
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
|
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
|
||||||
|
|
||||||
@@ -157,16 +164,21 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
|||||||
}
|
}
|
||||||
|
|
||||||
stubFeatures := &fs.Features{
|
stubFeatures := &fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
IsLocal: true,
|
IsLocal: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
PartialUploads: true,
|
ReadDirMetadata: true,
|
||||||
|
WriteDirMetadata: true,
|
||||||
|
WriteDirSetModTime: true,
|
||||||
|
UserDirMetadata: true,
|
||||||
|
DirModTimeUpdatesOnWrite: true,
|
||||||
|
PartialUploads: true,
|
||||||
}
|
}
|
||||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||||
|
|
||||||
@@ -334,6 +346,22 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
return errors.New("MergeDirs not supported")
|
return errors.New("MergeDirs not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DirSetModTime sets the directory modtime for dir
|
||||||
|
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||||
|
if do := f.Fs.Features().DirSetModTime; do != nil {
|
||||||
|
return do(ctx, dir, modTime)
|
||||||
|
}
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirMetadata makes the root directory of the Fs object
|
||||||
|
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||||
|
if do := f.Fs.Features().MkdirMetadata; do != nil {
|
||||||
|
return do(ctx, dir, metadata)
|
||||||
|
}
|
||||||
|
return nil, fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing
|
// DirCacheFlush resets the directory cache - used in testing
|
||||||
// as an optional interface
|
// as an optional interface
|
||||||
func (f *Fs) DirCacheFlush() {
|
func (f *Fs) DirCacheFlush() {
|
||||||
@@ -411,7 +439,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any cached connections.
|
// Shutdown the backend, closing any background tasks and any cached connections.
|
||||||
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||||
err = f.db.Stop(false)
|
if f.db != nil && !f.db.IsStopped() {
|
||||||
|
err = f.db.Stop(false)
|
||||||
|
}
|
||||||
if do := f.Fs.Features().Shutdown; do != nil {
|
if do := f.Fs.Features().Shutdown; do != nil {
|
||||||
if err2 := do(ctx); err2 != nil {
|
if err2 := do(ctx); err2 != nil {
|
||||||
err = err2
|
err = err2
|
||||||
@@ -505,6 +535,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets metadata for an Object
|
||||||
|
//
|
||||||
|
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||||
|
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||||
|
do, ok := o.Object.(fs.SetMetadataer)
|
||||||
|
if !ok {
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do.SetMetadata(ctx, metadata)
|
||||||
|
}
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
@@ -521,6 +562,8 @@ var (
|
|||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
|
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||||
|
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
|
|||||||
@@ -60,9 +60,11 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
|||||||
assert.NotNil(t, dst)
|
assert.NotNil(t, dst)
|
||||||
|
|
||||||
// check that hash was created
|
// check that hash was created
|
||||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
if f.opt.MaxAge > 0 {
|
||||||
assert.NoError(t, err)
|
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||||
assert.NotEmpty(t, hash)
|
assert.NoError(t, err)
|
||||||
|
assert.NotEmpty(t, hash)
|
||||||
|
}
|
||||||
//t.Logf("hash is %q", hash)
|
//t.Logf("hash is %q", hash)
|
||||||
_ = operations.Purge(ctx, f, dirName)
|
_ = operations.Purge(ctx, f, dirName)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
NilObject: (*hasher.Object)(nil),
|
NilObject: (*hasher.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{
|
UnimplementableFsMethods: []string{
|
||||||
"OpenWriterAt",
|
"OpenWriterAt",
|
||||||
|
"OpenChunkWriter",
|
||||||
},
|
},
|
||||||
UnimplementableObjectMethods: []string{},
|
UnimplementableObjectMethods: []string{},
|
||||||
}
|
}
|
||||||
@@ -36,4 +37,9 @@ func TestIntegration(t *testing.T) {
|
|||||||
opt.QuickTestOK = true
|
opt.QuickTestOK = true
|
||||||
}
|
}
|
||||||
fstests.Run(t, &opt)
|
fstests.Run(t, &opt)
|
||||||
|
// test again with MaxAge = 0
|
||||||
|
if *fstest.RemoteName == "" {
|
||||||
|
opt.ExtraConfig = append(opt.ExtraConfig, fstests.ExtraConfigItem{Name: "TestHasher", Key: "max_age", Value: "0"})
|
||||||
|
fstests.Run(t, &opt)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,7 +71,14 @@ func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string,
|
|||||||
f := o.f
|
f := o.f
|
||||||
if f.passHashes.Contains(hashType) {
|
if f.passHashes.Contains(hashType) {
|
||||||
fs.Debugf(o, "pass %s", hashType)
|
fs.Debugf(o, "pass %s", hashType)
|
||||||
return o.Object.Hash(ctx, hashType)
|
hashVal, err = o.Object.Hash(ctx, hashType)
|
||||||
|
if hashVal != "" {
|
||||||
|
return hashVal, err
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(o, "error passing %s: %v", hashType, err)
|
||||||
|
}
|
||||||
|
fs.Debugf(o, "passed %s is blank -- trying other methods", hashType)
|
||||||
}
|
}
|
||||||
if !f.suppHashes.Contains(hashType) {
|
if !f.suppHashes.Contains(hashType) {
|
||||||
fs.Debugf(o, "unsupp %s", hashType)
|
fs.Debugf(o, "unsupp %s", hashType)
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9
|
//go:build !plan9
|
||||||
// +build !plan9
|
|
||||||
|
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|
||||||
@@ -21,6 +20,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Fs represents a HDFS server
|
// Fs represents a HDFS server
|
||||||
@@ -31,8 +31,15 @@ type Fs struct {
|
|||||||
opt Options // options for this backend
|
opt Options // options for this backend
|
||||||
ci *fs.ConfigInfo // global config
|
ci *fs.ConfigInfo // global config
|
||||||
client *hdfs.Client
|
client *hdfs.Client
|
||||||
|
pacer *fs.Pacer // pacer for API calls
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
minSleep = 20 * time.Millisecond
|
||||||
|
maxSleep = 10 * time.Second
|
||||||
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
|
)
|
||||||
|
|
||||||
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||||
func getKerberosClient() (*krb.Client, error) {
|
func getKerberosClient() (*krb.Client, error) {
|
||||||
configPath := os.Getenv("KRB5_CONFIG")
|
configPath := os.Getenv("KRB5_CONFIG")
|
||||||
@@ -85,7 +92,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
options := hdfs.ClientOptions{
|
options := hdfs.ClientOptions{
|
||||||
Addresses: []string{opt.Namenode},
|
Addresses: opt.Namenode,
|
||||||
UseDatanodeHostname: false,
|
UseDatanodeHostname: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,6 +121,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
opt: *opt,
|
opt: *opt,
|
||||||
ci: fs.GetConfig(ctx),
|
ci: fs.GetConfig(ctx),
|
||||||
client: client,
|
client: client,
|
||||||
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -141,7 +149,7 @@ func (f *Fs) Root() string {
|
|||||||
|
|
||||||
// String returns a description of the FS
|
// String returns a description of the FS
|
||||||
func (f *Fs) String() string {
|
func (f *Fs) String() string {
|
||||||
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
|
return fmt.Sprintf("hdfs://%s/%s", f.opt.Namenode, f.root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
// Features returns the optional features of this Fs
|
||||||
@@ -201,7 +209,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
size: x.Size(),
|
size: x.Size(),
|
||||||
modTime: x.ModTime()})
|
modTime: x.ModTime(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return entries, nil
|
return entries, nil
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9
|
//go:build !plan9
|
||||||
// +build !plan9
|
|
||||||
|
|
||||||
// Package hdfs provides an interface to the HDFS storage system.
|
// Package hdfs provides an interface to the HDFS storage system.
|
||||||
package hdfs
|
package hdfs
|
||||||
@@ -19,9 +18,11 @@ func init() {
|
|||||||
Description: "Hadoop distributed file system",
|
Description: "Hadoop distributed file system",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "namenode",
|
Name: "namenode",
|
||||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
Help: "Hadoop name nodes and ports.\n\nE.g. \"namenode-1:8020,namenode-2:8020,...\" to connect to host namenodes at port 8020.",
|
||||||
Required: true,
|
Required: true,
|
||||||
|
Sensitive: true,
|
||||||
|
Default: fs.CommaSepList{},
|
||||||
}, {
|
}, {
|
||||||
Name: "username",
|
Name: "username",
|
||||||
Help: "Hadoop user name.",
|
Help: "Hadoop user name.",
|
||||||
@@ -29,6 +30,7 @@ func init() {
|
|||||||
Value: "root",
|
Value: "root",
|
||||||
Help: "Connect to hdfs as root.",
|
Help: "Connect to hdfs as root.",
|
||||||
}},
|
}},
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "service_principal_name",
|
Name: "service_principal_name",
|
||||||
Help: `Kerberos service principal name for the namenode.
|
Help: `Kerberos service principal name for the namenode.
|
||||||
@@ -36,7 +38,8 @@ func init() {
|
|||||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "data_transfer_protection",
|
Name: "data_transfer_protection",
|
||||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||||
@@ -62,7 +65,7 @@ and 'privacy'. Used only with KERBEROS enabled.`,
|
|||||||
|
|
||||||
// Options for this backend
|
// Options for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Namenode string `config:"namenode"`
|
Namenode fs.CommaSepList `config:"namenode"`
|
||||||
Username string `config:"username"`
|
Username string `config:"username"`
|
||||||
ServicePrincipalName string `config:"service_principal_name"`
|
ServicePrincipalName string `config:"service_principal_name"`
|
||||||
DataTransferProtection string `config:"data_transfer_protection"`
|
DataTransferProtection string `config:"data_transfer_protection"`
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
// Test HDFS filesystem interface
|
// Test HDFS filesystem interface
|
||||||
|
|
||||||
//go:build !plan9
|
//go:build !plan9
|
||||||
// +build !plan9
|
|
||||||
|
|
||||||
package hdfs_test
|
package hdfs_test
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,6 @@
|
|||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9
|
//go:build plan9
|
||||||
// +build plan9
|
|
||||||
|
|
||||||
|
// Package hdfs provides an interface to the HDFS storage system.
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
//go:build !plan9
|
//go:build !plan9
|
||||||
// +build !plan9
|
|
||||||
|
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/colinmarc/hdfs/v2"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
@@ -106,7 +107,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
// Update object
|
// Update object
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
realpath := o.fs.realpath(src.Remote())
|
realpath := o.fs.realpath(o.remote)
|
||||||
dirname := path.Dir(realpath)
|
dirname := path.Dir(realpath)
|
||||||
fs.Debugf(o.fs, "update [%s]", realpath)
|
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||||
|
|
||||||
@@ -141,7 +142,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = out.Close()
|
// If the datanodes have acknowledged all writes but not yet
|
||||||
|
// to the namenode, FileWriter.Close can return ErrReplicating
|
||||||
|
// (wrapped in an os.PathError). This indicates that all data
|
||||||
|
// has been written, but the lease is still open for the file.
|
||||||
|
//
|
||||||
|
// It is safe in this case to either ignore the error (and let
|
||||||
|
// the lease expire on its own) or to call Close multiple
|
||||||
|
// times until it completes without an error. The Java client,
|
||||||
|
// for context, always chooses to retry, with exponential
|
||||||
|
// backoff.
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
err := out.Close()
|
||||||
|
if err == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return errors.Is(err, hdfs.ErrReplicating), err
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cleanup()
|
cleanup()
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -762,6 +762,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown shutdown the fs
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
f.tokenRenewer.Shutdown()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Fs returns the parent Fs.
|
// Fs returns the parent Fs.
|
||||||
@@ -997,6 +1003,7 @@ var (
|
|||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ func init() {
|
|||||||
Name: "http",
|
Name: "http",
|
||||||
Description: "HTTP",
|
Description: "HTTP",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "url",
|
Name: "url",
|
||||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||||
@@ -88,6 +89,10 @@ that directory listings are much quicker, but rclone won't have the times or
|
|||||||
sizes of any files, and some files that don't exist may be in the listing.`,
|
sizes of any files, and some files that don't exist may be in the listing.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_escape",
|
||||||
|
Help: "Do not escape URL metacharacters in path names.",
|
||||||
|
Default: false,
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
@@ -99,6 +104,7 @@ type Options struct {
|
|||||||
NoSlash bool `config:"no_slash"`
|
NoSlash bool `config:"no_slash"`
|
||||||
NoHead bool `config:"no_head"`
|
NoHead bool `config:"no_head"`
|
||||||
Headers fs.CommaSepList `config:"headers"`
|
Headers fs.CommaSepList `config:"headers"`
|
||||||
|
NoEscape bool `config:"no_escape"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs stores the interface to the remote HTTP files
|
// Fs stores the interface to the remote HTTP files
|
||||||
@@ -210,6 +216,42 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
|
|||||||
return createFileResult()
|
return createFileResult()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make the http connection with opt
|
||||||
|
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
||||||
|
if len(opt.Headers)%2 != 0 {
|
||||||
|
return false, errors.New("odd number of headers supplied")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||||
|
opt.Endpoint += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the endpoint and stick the root onto it
|
||||||
|
base, err := url.Parse(opt.Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
u, err := rest.URLJoin(base, rest.URLPathEscape(f.root))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client := fshttp.NewClient(ctx)
|
||||||
|
|
||||||
|
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
||||||
|
fs.Debugf(nil, "Root: %s", endpoint)
|
||||||
|
u, err = url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update f with the new parameters
|
||||||
|
f.httpClient = client
|
||||||
|
f.endpoint = u
|
||||||
|
f.endpointURL = u.String()
|
||||||
|
return isFile, nil
|
||||||
|
}
|
||||||
|
|
||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
// NewFs creates a new Fs object from the name and root. It connects to
|
||||||
// the host specified in the config file.
|
// the host specified in the config file.
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
@@ -220,47 +262,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(opt.Headers)%2 != 0 {
|
|
||||||
return nil, errors.New("odd number of headers supplied")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
|
||||||
opt.Endpoint += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the endpoint and stick the root onto it
|
|
||||||
base, err := url.Parse(opt.Endpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
client := fshttp.NewClient(ctx)
|
|
||||||
|
|
||||||
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
|
||||||
fs.Debugf(nil, "Root: %s", endpoint)
|
|
||||||
u, err = url.Parse(endpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
ci: ci,
|
ci: ci,
|
||||||
httpClient: client,
|
|
||||||
endpoint: u,
|
|
||||||
endpointURL: u.String(),
|
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
|
// Make the http connection
|
||||||
|
isFile, err := f.httpConnection(ctx, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if isFile {
|
if isFile {
|
||||||
// return an error with an fs which points to the parent
|
// return an error with an fs which points to the parent
|
||||||
return f, fs.ErrorIsFile
|
return f, fs.ErrorIsFile
|
||||||
@@ -313,6 +331,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
|
|
||||||
// Join's the remote onto the base URL
|
// Join's the remote onto the base URL
|
||||||
func (f *Fs) url(remote string) string {
|
func (f *Fs) url(remote string) string {
|
||||||
|
if f.opt.NoEscape {
|
||||||
|
// Directly concatenate without escaping, no_escape behavior
|
||||||
|
return f.endpointURL + remote
|
||||||
|
}
|
||||||
|
// Default behavior
|
||||||
return f.endpointURL + rest.URLPathEscape(remote)
|
return f.endpointURL + rest.URLPathEscape(remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -685,10 +708,66 @@ func (o *Object) MimeType(ctx context.Context) string {
|
|||||||
return o.contentType
|
return o.contentType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var commandHelp = []fs.CommandHelp{{
|
||||||
|
Name: "set",
|
||||||
|
Short: "Set command for updating the config parameters.",
|
||||||
|
Long: `This set command can be used to update the config parameters
|
||||||
|
for a running http backend.
|
||||||
|
|
||||||
|
Usage Examples:
|
||||||
|
|
||||||
|
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
|
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
|
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
||||||
|
|
||||||
|
The option keys are named as they are in the config file.
|
||||||
|
|
||||||
|
This rebuilds the connection to the http backend when it is called with
|
||||||
|
the new parameters. Only new parameters need be passed as the values
|
||||||
|
will default to those currently in use.
|
||||||
|
|
||||||
|
It doesn't return anything.
|
||||||
|
`,
|
||||||
|
}}
|
||||||
|
|
||||||
|
// Command the backend to run a named command
|
||||||
|
//
|
||||||
|
// The command run is name
|
||||||
|
// args may be used to read arguments from
|
||||||
|
// opts may be used to read optional arguments from
|
||||||
|
//
|
||||||
|
// The result should be capable of being JSON encoded
|
||||||
|
// If it is a string or a []string it will be shown to the user
|
||||||
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||||
|
switch name {
|
||||||
|
case "set":
|
||||||
|
newOpt := f.opt
|
||||||
|
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading config: %w", err)
|
||||||
|
}
|
||||||
|
_, err = f.httpConnection(ctx, &newOpt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("updating session: %w", err)
|
||||||
|
}
|
||||||
|
f.opt = newOpt
|
||||||
|
keys := []string{}
|
||||||
|
for k := range opt {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
||||||
|
return nil, nil
|
||||||
|
default:
|
||||||
|
return nil, fs.ErrorCommandNotFound
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
|
_ fs.Commander = &Fs{}
|
||||||
)
|
)
|
||||||
|
|||||||
166
backend/iclouddrive/api/client.go
Normal file
166
backend/iclouddrive/api/client.go
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
// Package api provides functionality for interacting with the iCloud API.
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
baseEndpoint = "https://www.icloud.com"
|
||||||
|
homeEndpoint = "https://www.icloud.com"
|
||||||
|
setupEndpoint = "https://setup.icloud.com/setup/ws/1"
|
||||||
|
authEndpoint = "https://idmsa.apple.com/appleauth/auth"
|
||||||
|
)
|
||||||
|
|
||||||
|
type sessionSave func(*Session)
|
||||||
|
|
||||||
|
// Client defines the client configuration
|
||||||
|
type Client struct {
|
||||||
|
appleID string
|
||||||
|
password string
|
||||||
|
srv *rest.Client
|
||||||
|
Session *Session
|
||||||
|
sessionSaveCallback sessionSave
|
||||||
|
|
||||||
|
drive *DriveService
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Client instance with the provided Apple ID, password, trust token, cookies, and session save callback.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - appleID: the Apple ID of the user.
|
||||||
|
// - password: the password of the user.
|
||||||
|
// - trustToken: the trust token for the session.
|
||||||
|
// - clientID: the client id for the session.
|
||||||
|
// - cookies: the cookies for the session.
|
||||||
|
// - sessionSaveCallback: the callback function to save the session.
|
||||||
|
func New(appleID, password, trustToken string, clientID string, cookies []*http.Cookie, sessionSaveCallback sessionSave) (*Client, error) {
|
||||||
|
icloud := &Client{
|
||||||
|
appleID: appleID,
|
||||||
|
password: password,
|
||||||
|
srv: rest.NewClient(fshttp.NewClient(context.Background())),
|
||||||
|
Session: NewSession(),
|
||||||
|
sessionSaveCallback: sessionSaveCallback,
|
||||||
|
}
|
||||||
|
|
||||||
|
icloud.Session.TrustToken = trustToken
|
||||||
|
icloud.Session.Cookies = cookies
|
||||||
|
icloud.Session.ClientID = clientID
|
||||||
|
return icloud, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriveService returns the DriveService instance associated with the Client.
|
||||||
|
func (c *Client) DriveService() (*DriveService, error) {
|
||||||
|
var err error
|
||||||
|
if c.drive == nil {
|
||||||
|
c.drive, err = NewDriveService(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c.drive, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request makes a request and retries it if the session is invalid.
|
||||||
|
//
|
||||||
|
// This function is the main entry point for making requests to the iCloud
|
||||||
|
// API. If the initial request returns a 401 (Unauthorized), it will try to
|
||||||
|
// reauthenticate and retry the request.
|
||||||
|
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||||
|
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||||
|
if err != nil && resp != nil {
|
||||||
|
// try to reauth
|
||||||
|
if resp.StatusCode == 401 || resp.StatusCode == 421 {
|
||||||
|
err = c.Authenticate(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Session.Requires2FA() {
|
||||||
|
return nil, errors.New("trust token expired, please reauth")
|
||||||
|
}
|
||||||
|
return c.RequestNoReAuth(ctx, opts, request, response)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestNoReAuth makes a request without re-authenticating.
|
||||||
|
//
|
||||||
|
// This function is useful when you have a session that is already
|
||||||
|
// authenticated, but you need to make a request without triggering
|
||||||
|
// a re-authentication.
|
||||||
|
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||||
|
// Make the request without re-authenticating
|
||||||
|
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authenticate authenticates the client with the iCloud API.
|
||||||
|
func (c *Client) Authenticate(ctx context.Context) error {
|
||||||
|
if c.Session.Cookies != nil {
|
||||||
|
if err := c.Session.ValidateSession(ctx); err == nil {
|
||||||
|
fs.Debugf("icloud", "Valid session, no need to reauth")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c.Session.Cookies = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Debugf("icloud", "Authenticating as %s\n", c.appleID)
|
||||||
|
err := c.Session.SignIn(ctx, c.appleID, c.password)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
err = c.Session.AuthWithToken(ctx)
|
||||||
|
if err == nil && c.sessionSaveCallback != nil {
|
||||||
|
c.sessionSaveCallback(c.Session)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignIn signs in the client using the provided context and credentials.
|
||||||
|
func (c *Client) SignIn(ctx context.Context) error {
|
||||||
|
return c.Session.SignIn(ctx, c.appleID, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntoReader marshals the provided values into a JSON encoded reader
|
||||||
|
func IntoReader(values any) (*bytes.Reader, error) {
|
||||||
|
m, err := json.Marshal(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bytes.NewReader(m), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestError holds info on a result state, icloud can return a 200 but the result is unknown
|
||||||
|
type RequestError struct {
|
||||||
|
Status string
|
||||||
|
Text string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error satisfy the error interface.
|
||||||
|
func (e *RequestError) Error() string {
|
||||||
|
return fmt.Sprintf("%s: %s", e.Text, e.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRequestError(Status string, Text string) *RequestError {
|
||||||
|
return &RequestError{
|
||||||
|
Status: strings.ToLower(Status),
|
||||||
|
Text: Text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newErr orf makes a new error from sprintf parameters.
|
||||||
|
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
|
||||||
|
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
|
||||||
|
}
|
||||||
913
backend/iclouddrive/api/drive.go
Normal file
913
backend/iclouddrive/api/drive.go
Normal file
@@ -0,0 +1,913 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultZone = "com.apple.CloudDocs"
|
||||||
|
statusOk = "OK"
|
||||||
|
statusEtagConflict = "ETAG_CONFLICT"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DriveService represents an iCloud Drive service.
|
||||||
|
type DriveService struct {
|
||||||
|
icloud *Client
|
||||||
|
RootID string
|
||||||
|
endpoint string
|
||||||
|
docsEndpoint string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDriveService creates a new DriveService instance.
|
||||||
|
func NewDriveService(icloud *Client) (*DriveService, error) {
|
||||||
|
return &DriveService{icloud: icloud, RootID: "FOLDER::com.apple.CloudDocs::root", endpoint: icloud.Session.AccountInfo.Webservices["drivews"].URL, docsEndpoint: icloud.Session.AccountInfo.Webservices["docws"].URL}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItemByDriveID retrieves a DriveItem by its Drive ID.
|
||||||
|
func (d *DriveService) GetItemByDriveID(ctx context.Context, id string, includeChildren bool) (*DriveItem, *http.Response, error) {
|
||||||
|
items, resp, err := d.GetItemsByDriveID(ctx, []string{id}, includeChildren)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return items[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItemsByDriveID retrieves DriveItems by their Drive IDs.
|
||||||
|
func (d *DriveService) GetItemsByDriveID(ctx context.Context, ids []string, includeChildren bool) ([]*DriveItem, *http.Response, error) {
|
||||||
|
var err error
|
||||||
|
_items := []map[string]any{}
|
||||||
|
for _, id := range ids {
|
||||||
|
_items = append(_items, map[string]any{
|
||||||
|
"drivewsid": id,
|
||||||
|
"partialData": false,
|
||||||
|
"includeHierarchy": false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var body *bytes.Reader
|
||||||
|
var path string
|
||||||
|
if !includeChildren {
|
||||||
|
values := []map[string]any{{
|
||||||
|
"items": _items,
|
||||||
|
}}
|
||||||
|
body, err = IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
path = "/retrieveItemDetails"
|
||||||
|
} else {
|
||||||
|
values := _items
|
||||||
|
body, err = IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
path = "/retrieveItemDetailsInFolders"
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.endpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var items []*DriveItem
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return items, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDocByPath retrieves a document by its path.
|
||||||
|
func (d *DriveService) GetDocByPath(ctx context.Context, path string) (*Document, *http.Response, error) {
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("unified_format", "false")
|
||||||
|
body, err := IntoReader(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Parameters: values,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var item []*Document
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return item[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItemByPath retrieves a DriveItem by its path.
|
||||||
|
func (d *DriveService) GetItemByPath(ctx context.Context, path string) (*DriveItem, *http.Response, error) {
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("unified_format", "true")
|
||||||
|
|
||||||
|
body, err := IntoReader(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Parameters: values,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var item []*DriveItem
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return item[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDocByItemID retrieves a document by its item ID.
|
||||||
|
func (d *DriveService) GetDocByItemID(ctx context.Context, id string) (*Document, *http.Response, error) {
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("document_id", id)
|
||||||
|
values.Set("unified_format", "false") // important
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/ws/" + defaultZone + "/list/lookup_by_id",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Parameters: values,
|
||||||
|
}
|
||||||
|
var item *Document
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return item, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItemRawByItemID retrieves a DriveItemRaw by its item ID.
|
||||||
|
func (d *DriveService) GetItemRawByItemID(ctx context.Context, id string) (*DriveItemRaw, *http.Response, error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/v1/item/" + id,
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
}
|
||||||
|
var item *DriveItemRaw
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return item, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItemsInFolder retrieves a list of DriveItemRaw objects in a folder with the given ID.
|
||||||
|
func (d *DriveService) GetItemsInFolder(ctx context.Context, id string, limit int64) ([]*DriveItemRaw, *http.Response, error) {
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("limit", strconv.FormatInt(limit, 10))
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/v1/enumerate/" + id,
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Parameters: values,
|
||||||
|
}
|
||||||
|
|
||||||
|
items := struct {
|
||||||
|
Items []*DriveItemRaw `json:"drive_item"`
|
||||||
|
}{}
|
||||||
|
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return items.Items, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDownloadURLByDriveID retrieves the download URL for a file in the DriveService.
|
||||||
|
func (d *DriveService) GetDownloadURLByDriveID(ctx context.Context, id string) (string, *http.Response, error) {
|
||||||
|
_, zone, docid := DeconstructDriveID(id)
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("document_id", docid)
|
||||||
|
|
||||||
|
if zone == "" {
|
||||||
|
zone = defaultZone
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/ws/" + zone + "/download/by_id",
|
||||||
|
Parameters: values,
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
}
|
||||||
|
|
||||||
|
var filer *FileRequest
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &filer)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var url string
|
||||||
|
if filer.DataToken != nil {
|
||||||
|
url = filer.DataToken.URL
|
||||||
|
} else {
|
||||||
|
url = filer.PackageToken.URL
|
||||||
|
}
|
||||||
|
|
||||||
|
return url, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile downloads a file from the given URL using the provided options.
|
||||||
|
func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.OpenOption) (*http.Response, error) {
|
||||||
|
opts := &rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: url,
|
||||||
|
Options: opt,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := d.icloud.srv.Call(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
// icloud has some weird http codes
|
||||||
|
if resp.StatusCode == 330 {
|
||||||
|
loc, err := resp.Location()
|
||||||
|
if err == nil {
|
||||||
|
return d.DownloadFile(ctx, loc.String(), opt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
return d.icloud.srv.Call(ctx, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
|
||||||
|
func (d *DriveService) MoveItemToTrashByItemID(ctx context.Context, id, etag string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return d.MoveItemToTrashByID(ctx, doc.DriveID(), etag, force)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveItemToTrashByID moves an item to the trash based on the item ID.
|
||||||
|
func (d *DriveService) MoveItemToTrashByID(ctx context.Context, drivewsid, etag string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
values := map[string]any{
|
||||||
|
"items": []map[string]any{{
|
||||||
|
"drivewsid": drivewsid,
|
||||||
|
"etag": etag,
|
||||||
|
"clientId": drivewsid,
|
||||||
|
}}}
|
||||||
|
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/moveItemsToTrash",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.endpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
|
||||||
|
item := struct {
|
||||||
|
Items []*DriveItem `json:"items"`
|
||||||
|
}{}
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if item.Items[0].Status != statusOk {
|
||||||
|
// rerun with latest etag
|
||||||
|
if force && item.Items[0].Status == "ETAG_CONFLICT" {
|
||||||
|
return d.MoveItemToTrashByID(ctx, drivewsid, item.Items[0].Etag, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = newRequestError(item.Items[0].Status, "unknown request status")
|
||||||
|
}
|
||||||
|
|
||||||
|
return item.Items[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateNewFolderByItemID creates a new folder by item ID.
|
||||||
|
func (d *DriveService) CreateNewFolderByItemID(ctx context.Context, id, name string) (*DriveItem, *http.Response, error) {
|
||||||
|
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return d.CreateNewFolderByDriveID(ctx, doc.DriveID(), name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateNewFolderByDriveID creates a new folder by its Drive ID.
|
||||||
|
func (d *DriveService) CreateNewFolderByDriveID(ctx context.Context, drivewsid, name string) (*DriveItem, *http.Response, error) {
|
||||||
|
values := map[string]any{
|
||||||
|
"destinationDrivewsId": drivewsid,
|
||||||
|
"folders": []map[string]any{{
|
||||||
|
"clientId": "FOLDER::UNKNOWN_ZONE::TempId-" + uuid.New().String(),
|
||||||
|
"name": name,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/createFolders",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.endpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var fResp *CreateFoldersResponse
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &fResp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
status := fResp.Folders[0].Status
|
||||||
|
if status != statusOk {
|
||||||
|
err = newRequestError(status, "unknown request status")
|
||||||
|
}
|
||||||
|
|
||||||
|
return fResp.Folders[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameItemByItemID renames a DriveItem by its item ID.
|
||||||
|
func (d *DriveService) RenameItemByItemID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return d.RenameItemByDriveID(ctx, doc.DriveID(), doc.Etag, name, force)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameItemByDriveID renames a DriveItem by its drive ID.
|
||||||
|
func (d *DriveService) RenameItemByDriveID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
values := map[string]any{
|
||||||
|
"items": []map[string]any{{
|
||||||
|
"drivewsid": id,
|
||||||
|
"name": name,
|
||||||
|
"etag": etag,
|
||||||
|
// "extension": split[1],
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/renameItems",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.endpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var items *DriveItem
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
status := items.Items[0].Status
|
||||||
|
if status != statusOk {
|
||||||
|
// rerun with latest etag
|
||||||
|
if force && status == "ETAG_CONFLICT" {
|
||||||
|
return d.RenameItemByDriveID(ctx, id, items.Items[0].Etag, name, false)
|
||||||
|
}
|
||||||
|
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
return items.Items[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveItemByItemID moves an item by its item ID to a destination item ID.
|
||||||
|
func (d *DriveService) MoveItemByItemID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
docSrc, resp, err := d.GetDocByItemID(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
docDst, resp, err := d.GetDocByItemID(ctx, dstID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return d.MoveItemByDriveID(ctx, docSrc.DriveID(), docSrc.Etag, docDst.DriveID(), force)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveItemByDocID moves an item by its doc ID.
|
||||||
|
// func (d *DriveService) MoveItemByDocID(ctx context.Context, srcDocID, srcEtag, dstDocID string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
// return d.MoveItemByDriveID(ctx, srcDocID, srcEtag, docDst.DriveID(), force)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// MoveItemByDriveID moves an item by its drive ID.
|
||||||
|
func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
values := map[string]any{
|
||||||
|
"destinationDrivewsId": dstID,
|
||||||
|
"items": []map[string]any{{
|
||||||
|
"drivewsid": id,
|
||||||
|
"etag": etag,
|
||||||
|
"clientId": id,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/moveItems",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.endpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
|
||||||
|
var items *DriveItem
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
status := items.Items[0].Status
|
||||||
|
if status != statusOk {
|
||||||
|
// rerun with latest etag
|
||||||
|
if force && status == "ETAG_CONFLICT" {
|
||||||
|
return d.MoveItemByDriveID(ctx, id, items.Items[0].Etag, dstID, false)
|
||||||
|
}
|
||||||
|
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
return items.Items[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyDocByItemID copies a document by its item ID.
|
||||||
|
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
|
||||||
|
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
|
||||||
|
values := map[string]any{
|
||||||
|
"info_to_update": map[string]any{},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/v1/item/copy/" + itemID,
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
|
||||||
|
var info *DriveItemRaw
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &info)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return info, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateUpload creates an url for an upload.
|
||||||
|
func (d *DriveService) CreateUpload(ctx context.Context, size int64, name string) (*UploadResponse, *http.Response, error) {
|
||||||
|
// first we need to request an upload url
|
||||||
|
values := map[string]any{
|
||||||
|
"filename": name,
|
||||||
|
"type": "FILE",
|
||||||
|
"size": strconv.FormatInt(size, 10),
|
||||||
|
"content_type": GetContentTypeForFile(name),
|
||||||
|
}
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/ws/" + defaultZone + "/upload/web",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var responseInfo []*UploadResponse
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return responseInfo[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload uploads a file to the given url
|
||||||
|
func (d *DriveService) Upload(ctx context.Context, in io.Reader, size int64, name, uploadURL string) (*SingleFileResponse, *http.Response, error) {
|
||||||
|
// TODO: implement multipart upload
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: uploadURL,
|
||||||
|
Body: in,
|
||||||
|
ContentLength: &size,
|
||||||
|
ContentType: GetContentTypeForFile(name),
|
||||||
|
// MultipartContentName: "files",
|
||||||
|
MultipartFileName: name,
|
||||||
|
}
|
||||||
|
var singleFileResponse *SingleFileResponse
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &singleFileResponse)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return singleFileResponse, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateFile updates a file in the DriveService.
|
||||||
|
//
|
||||||
|
// ctx: the context.Context object for the request.
|
||||||
|
// r: a pointer to the UpdateFileInfo struct containing the information for the file update.
|
||||||
|
// Returns a pointer to the DriveItem struct representing the updated file, the http.Response object, and an error if any.
|
||||||
|
func (d *DriveService) UpdateFile(ctx context.Context, r *UpdateFileInfo) (*DriveItem, *http.Response, error) {
|
||||||
|
body, err := IntoReader(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/ws/" + defaultZone + "/update/documents",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var responseInfo *DocumentUpdateResponse
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
doc := responseInfo.Results[0].Document
|
||||||
|
item := DriveItem{
|
||||||
|
Drivewsid: "FILE::com.apple.CloudDocs::" + doc.DocumentID,
|
||||||
|
Docwsid: doc.DocumentID,
|
||||||
|
Itemid: doc.ItemID,
|
||||||
|
Etag: doc.Etag,
|
||||||
|
ParentID: doc.ParentID,
|
||||||
|
DateModified: time.Unix(r.Mtime, 0),
|
||||||
|
DateCreated: time.Unix(r.Mtime, 0),
|
||||||
|
Type: doc.Type,
|
||||||
|
Name: doc.Name,
|
||||||
|
Size: doc.Size,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &item, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateFileInfo represents the information for an update to a file in the DriveService.
|
||||||
|
type UpdateFileInfo struct {
|
||||||
|
AllowConflict bool `json:"allow_conflict"`
|
||||||
|
Btime int64 `json:"btime"`
|
||||||
|
Command string `json:"command"`
|
||||||
|
CreateShortGUID bool `json:"create_short_guid"`
|
||||||
|
Data struct {
|
||||||
|
Receipt string `json:"receipt,omitempty"`
|
||||||
|
ReferenceSignature string `json:"reference_signature,omitempty"`
|
||||||
|
Signature string `json:"signature,omitempty"`
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
|
WrappingKey string `json:"wrapping_key,omitempty"`
|
||||||
|
} `json:"data,omitempty"`
|
||||||
|
DocumentID string `json:"document_id"`
|
||||||
|
FileFlags FileFlags `json:"file_flags"`
|
||||||
|
Mtime int64 `json:"mtime"`
|
||||||
|
Path struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
StartingDocumentID string `json:"starting_document_id"`
|
||||||
|
} `json:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileFlags defines the file flags for a document.
|
||||||
|
type FileFlags struct {
|
||||||
|
IsExecutable bool `json:"is_executable"`
|
||||||
|
IsHidden bool `json:"is_hidden"`
|
||||||
|
IsWritable bool `json:"is_writable"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUpdateFileInfo creates a new UpdateFileInfo object with default values.
|
||||||
|
//
|
||||||
|
// Returns an UpdateFileInfo object.
|
||||||
|
func NewUpdateFileInfo() UpdateFileInfo {
|
||||||
|
return UpdateFileInfo{
|
||||||
|
Command: "add_file",
|
||||||
|
CreateShortGUID: true,
|
||||||
|
AllowConflict: true,
|
||||||
|
FileFlags: FileFlags{
|
||||||
|
IsExecutable: true,
|
||||||
|
IsHidden: false,
|
||||||
|
IsWritable: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriveItemRaw is a raw drive item.
|
||||||
|
// not suure what to call this but there seems to be a "unified" and non "unified" drive item response. This is the non unified.
|
||||||
|
type DriveItemRaw struct {
|
||||||
|
ItemID string `json:"item_id"`
|
||||||
|
ItemInfo *DriveItemRawInfo `json:"item_info"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitName splits the name of a DriveItemRaw into its name and extension.
|
||||||
|
//
|
||||||
|
// It returns the name and extension as separate strings. If the name ends with a dot,
|
||||||
|
// it means there is no extension, so an empty string is returned for the extension.
|
||||||
|
// If the name does not contain a dot, it means
|
||||||
|
func (d *DriveItemRaw) SplitName() (string, string) {
|
||||||
|
name := d.ItemInfo.Name
|
||||||
|
// ends with a dot, no extension
|
||||||
|
if strings.HasSuffix(name, ".") {
|
||||||
|
return name, ""
|
||||||
|
}
|
||||||
|
lastInd := strings.LastIndex(name, ".")
|
||||||
|
|
||||||
|
if lastInd == -1 {
|
||||||
|
return name, ""
|
||||||
|
}
|
||||||
|
return name[:lastInd], name[lastInd+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the DriveItemRaw.
|
||||||
|
//
|
||||||
|
// It parses the ModifiedAt field of the ItemInfo struct and converts it to a time.Time value.
|
||||||
|
// If the parsing fails, it returns the zero value of time.Time.
|
||||||
|
// The returned time.Time value represents the modification time of the DriveItemRaw.
|
||||||
|
func (d *DriveItemRaw) ModTime() time.Time {
|
||||||
|
i, err := strconv.ParseInt(d.ItemInfo.ModifiedAt, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
return time.UnixMilli(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedTime returns the creation time of the DriveItemRaw.
|
||||||
|
//
|
||||||
|
// It parses the CreatedAt field of the ItemInfo struct and converts it to a time.Time value.
|
||||||
|
// If the parsing fails, it returns the zero value of time.Time.
|
||||||
|
// The returned time.Time
|
||||||
|
func (d *DriveItemRaw) CreatedTime() time.Time {
|
||||||
|
i, err := strconv.ParseInt(d.ItemInfo.CreatedAt, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
return time.UnixMilli(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriveItemRawInfo is the raw information about a drive item.
|
||||||
|
type DriveItemRawInfo struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
// Extension is absolutely borked on endpoints so dont use it.
|
||||||
|
Extension string `json:"extension"`
|
||||||
|
Size int64 `json:"size,string"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
ModifiedAt string `json:"modified_at"`
|
||||||
|
CreatedAt string `json:"created_at"`
|
||||||
|
Urls struct {
|
||||||
|
URLDownload string `json:"url_download"`
|
||||||
|
} `json:"urls"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntoDriveItem converts a DriveItemRaw into a DriveItem.
|
||||||
|
//
|
||||||
|
// It takes no parameters.
|
||||||
|
// It returns a pointer to a DriveItem.
|
||||||
|
func (d *DriveItemRaw) IntoDriveItem() *DriveItem {
|
||||||
|
name, extension := d.SplitName()
|
||||||
|
return &DriveItem{
|
||||||
|
Itemid: d.ItemID,
|
||||||
|
Name: name,
|
||||||
|
Extension: extension,
|
||||||
|
Type: d.ItemInfo.Type,
|
||||||
|
Etag: d.ItemInfo.Version,
|
||||||
|
DateModified: d.ModTime(),
|
||||||
|
DateCreated: d.CreatedTime(),
|
||||||
|
Size: d.ItemInfo.Size,
|
||||||
|
Urls: d.ItemInfo.Urls,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DocumentUpdateResponse is the response of a document update request.
|
||||||
|
type DocumentUpdateResponse struct {
|
||||||
|
Status struct {
|
||||||
|
StatusCode int `json:"status_code"`
|
||||||
|
ErrorMessage string `json:"error_message"`
|
||||||
|
} `json:"status"`
|
||||||
|
Results []struct {
|
||||||
|
Status struct {
|
||||||
|
StatusCode int `json:"status_code"`
|
||||||
|
ErrorMessage string `json:"error_message"`
|
||||||
|
} `json:"status"`
|
||||||
|
OperationID interface{} `json:"operation_id"`
|
||||||
|
Document *Document `json:"document"`
|
||||||
|
} `json:"results"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Document represents a document on iCloud.
|
||||||
|
type Document struct {
|
||||||
|
Status struct {
|
||||||
|
StatusCode int `json:"status_code"`
|
||||||
|
ErrorMessage string `json:"error_message"`
|
||||||
|
} `json:"status"`
|
||||||
|
DocumentID string `json:"document_id"`
|
||||||
|
ItemID string `json:"item_id"`
|
||||||
|
Urls struct {
|
||||||
|
URLDownload string `json:"url_download"`
|
||||||
|
} `json:"urls"`
|
||||||
|
Etag string `json:"etag"`
|
||||||
|
ParentID string `json:"parent_id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Deleted bool `json:"deleted"`
|
||||||
|
Mtime int64 `json:"mtime"`
|
||||||
|
LastEditorName string `json:"last_editor_name"`
|
||||||
|
Data DocumentData `json:"data"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Btime int64 `json:"btime"`
|
||||||
|
Zone string `json:"zone"`
|
||||||
|
FileFlags struct {
|
||||||
|
IsExecutable bool `json:"is_executable"`
|
||||||
|
IsWritable bool `json:"is_writable"`
|
||||||
|
IsHidden bool `json:"is_hidden"`
|
||||||
|
} `json:"file_flags"`
|
||||||
|
LastOpenedTime int64 `json:"lastOpenedTime"`
|
||||||
|
RestorePath interface{} `json:"restorePath"`
|
||||||
|
HasChainedParent bool `json:"hasChainedParent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriveID returns the drive ID of the Document.
|
||||||
|
func (d *Document) DriveID() string {
|
||||||
|
if d.Zone == "" {
|
||||||
|
d.Zone = defaultZone
|
||||||
|
}
|
||||||
|
return d.Type + "::" + d.Zone + "::" + d.DocumentID
|
||||||
|
}
|
||||||
|
|
||||||
|
// DocumentData represents the data of a document.
|
||||||
|
type DocumentData struct {
|
||||||
|
Signature string `json:"signature"`
|
||||||
|
Owner string `json:"owner"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
ReferenceSignature string `json:"reference_signature"`
|
||||||
|
WrappingKey string `json:"wrapping_key"`
|
||||||
|
PcsInfo string `json:"pcsInfo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SingleFileResponse is the response of a single file request.
|
||||||
|
type SingleFileResponse struct {
|
||||||
|
SingleFile *SingleFileInfo `json:"singleFile"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SingleFileInfo represents the information of a single file.
|
||||||
|
type SingleFileInfo struct {
|
||||||
|
ReferenceSignature string `json:"referenceChecksum"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Signature string `json:"fileChecksum"`
|
||||||
|
WrappingKey string `json:"wrappingKey"`
|
||||||
|
Receipt string `json:"receipt"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadResponse is the response of an upload request.
|
||||||
|
type UploadResponse struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
DocumentID string `json:"document_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileRequestToken represents the token of a file request.
|
||||||
|
type FileRequestToken struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Token string `json:"token"`
|
||||||
|
Signature string `json:"signature"`
|
||||||
|
WrappingKey string `json:"wrapping_key"`
|
||||||
|
ReferenceSignature string `json:"reference_signature"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileRequest represents the request of a file.
|
||||||
|
type FileRequest struct {
|
||||||
|
DocumentID string `json:"document_id"`
|
||||||
|
ItemID string `json:"item_id"`
|
||||||
|
OwnerDsid int64 `json:"owner_dsid"`
|
||||||
|
DataToken *FileRequestToken `json:"data_token,omitempty"`
|
||||||
|
PackageToken *FileRequestToken `json:"package_token,omitempty"`
|
||||||
|
DoubleEtag string `json:"double_etag"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateFoldersResponse is the response of a create folders request.
|
||||||
|
type CreateFoldersResponse struct {
|
||||||
|
Folders []*DriveItem `json:"folders"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriveItem represents an item on iCloud.
|
||||||
|
type DriveItem struct {
|
||||||
|
DateCreated time.Time `json:"dateCreated"`
|
||||||
|
Drivewsid string `json:"drivewsid"`
|
||||||
|
Docwsid string `json:"docwsid"`
|
||||||
|
Itemid string `json:"item_id"`
|
||||||
|
Zone string `json:"zone"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
ParentID string `json:"parentId"`
|
||||||
|
Hierarchy []DriveItem `json:"hierarchy"`
|
||||||
|
Etag string `json:"etag"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
AssetQuota int64 `json:"assetQuota"`
|
||||||
|
FileCount int64 `json:"fileCount"`
|
||||||
|
ShareCount int64 `json:"shareCount"`
|
||||||
|
ShareAliasCount int64 `json:"shareAliasCount"`
|
||||||
|
DirectChildrenCount int64 `json:"directChildrenCount"`
|
||||||
|
Items []*DriveItem `json:"items"`
|
||||||
|
NumberOfItems int64 `json:"numberOfItems"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Extension string `json:"extension,omitempty"`
|
||||||
|
DateModified time.Time `json:"dateModified,omitempty"`
|
||||||
|
DateChanged time.Time `json:"dateChanged,omitempty"`
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
|
LastOpenTime time.Time `json:"lastOpenTime,omitempty"`
|
||||||
|
Urls struct {
|
||||||
|
URLDownload string `json:"url_download"`
|
||||||
|
} `json:"urls"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFolder returns true if the item is a folder.
|
||||||
|
func (d *DriveItem) IsFolder() bool {
|
||||||
|
return d.Type == "FOLDER" || d.Type == "APP_CONTAINER" || d.Type == "APP_LIBRARY"
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadURL returns the download URL of the item.
|
||||||
|
func (d *DriveItem) DownloadURL() string {
|
||||||
|
return d.Urls.URLDownload
|
||||||
|
}
|
||||||
|
|
||||||
|
// FullName returns the full name of the item.
|
||||||
|
// name + extension
|
||||||
|
func (d *DriveItem) FullName() string {
|
||||||
|
if d.Extension != "" {
|
||||||
|
return d.Name + "." + d.Extension
|
||||||
|
}
|
||||||
|
return d.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDocIDFromDriveID returns the DocumentID from the drive ID.
|
||||||
|
func GetDocIDFromDriveID(id string) string {
|
||||||
|
split := strings.Split(id, "::")
|
||||||
|
return split[len(split)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeconstructDriveID returns the document type, zone, and document ID from the drive ID.
|
||||||
|
func DeconstructDriveID(id string) (docType, zone, docid string) {
|
||||||
|
split := strings.Split(id, "::")
|
||||||
|
if len(split) < 3 {
|
||||||
|
return "", "", id
|
||||||
|
}
|
||||||
|
return split[0], split[1], split[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConstructDriveID constructs a drive ID from the given components.
|
||||||
|
func ConstructDriveID(id string, zone string, t string) string {
|
||||||
|
return strings.Join([]string{t, zone, id}, "::")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContentTypeForFile detects content type for given file name.
|
||||||
|
func GetContentTypeForFile(name string) string {
|
||||||
|
// detect MIME type by looking at the filename only
|
||||||
|
mimeType := mime.TypeByExtension(filepath.Ext(name))
|
||||||
|
if mimeType == "" {
|
||||||
|
// api requires a mime type passed in
|
||||||
|
mimeType = "text/plain"
|
||||||
|
}
|
||||||
|
return strings.Split(mimeType, ";")[0]
|
||||||
|
}
|
||||||
412
backend/iclouddrive/api/session.go
Normal file
412
backend/iclouddrive/api/session.go
Normal file
@@ -0,0 +1,412 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/oracle/oci-go-sdk/v65/common"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Session represents an iCloud session
|
||||||
|
type Session struct {
|
||||||
|
SessionToken string `json:"session_token"`
|
||||||
|
Scnt string `json:"scnt"`
|
||||||
|
SessionID string `json:"session_id"`
|
||||||
|
AccountCountry string `json:"account_country"`
|
||||||
|
TrustToken string `json:"trust_token"`
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
Cookies []*http.Cookie `json:"cookies"`
|
||||||
|
AccountInfo AccountInfo `json:"account_info"`
|
||||||
|
|
||||||
|
srv *rest.Client `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the session as a string
|
||||||
|
// func (s *Session) String() string {
|
||||||
|
// jsession, _ := json.Marshal(s)
|
||||||
|
// return string(jsession)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Request makes a request
|
||||||
|
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
|
||||||
|
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if val := resp.Header.Get("X-Apple-ID-Account-Country"); val != "" {
|
||||||
|
s.AccountCountry = val
|
||||||
|
}
|
||||||
|
if val := resp.Header.Get("X-Apple-ID-Session-Id"); val != "" {
|
||||||
|
s.SessionID = val
|
||||||
|
}
|
||||||
|
if val := resp.Header.Get("X-Apple-Session-Token"); val != "" {
|
||||||
|
s.SessionToken = val
|
||||||
|
}
|
||||||
|
if val := resp.Header.Get("X-Apple-TwoSV-Trust-Token"); val != "" {
|
||||||
|
s.TrustToken = val
|
||||||
|
}
|
||||||
|
if val := resp.Header.Get("scnt"); val != "" {
|
||||||
|
s.Scnt = val
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Requires2FA returns true if the session requires 2FA
|
||||||
|
func (s *Session) Requires2FA() bool {
|
||||||
|
return s.AccountInfo.DsInfo.HsaVersion == 2 && s.AccountInfo.HsaChallengeRequired
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignIn signs in the session
|
||||||
|
func (s *Session) SignIn(ctx context.Context, appleID, password string) error {
|
||||||
|
trustTokens := []string{}
|
||||||
|
if s.TrustToken != "" {
|
||||||
|
trustTokens = []string{s.TrustToken}
|
||||||
|
}
|
||||||
|
values := map[string]any{
|
||||||
|
"accountName": appleID,
|
||||||
|
"password": password,
|
||||||
|
"rememberMe": true,
|
||||||
|
"trustTokens": trustTokens,
|
||||||
|
}
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/signin",
|
||||||
|
Parameters: url.Values{},
|
||||||
|
ExtraHeaders: s.GetAuthHeaders(map[string]string{}),
|
||||||
|
RootURL: authEndpoint,
|
||||||
|
IgnoreStatus: true, // need to handle 409 for hsa2
|
||||||
|
NoResponse: true,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
opts.Parameters.Set("isRememberMeEnabled", "true")
|
||||||
|
_, err = s.Request(ctx, opts, nil, nil)
|
||||||
|
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthWithToken authenticates the session
|
||||||
|
func (s *Session) AuthWithToken(ctx context.Context) error {
|
||||||
|
values := map[string]any{
|
||||||
|
"accountCountryCode": s.AccountCountry,
|
||||||
|
"dsWebAuthToken": s.SessionToken,
|
||||||
|
"extended_login": true,
|
||||||
|
"trustToken": s.TrustToken,
|
||||||
|
}
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/accountLogin",
|
||||||
|
ExtraHeaders: GetCommonHeaders(map[string]string{}),
|
||||||
|
RootURL: setupEndpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := s.Request(ctx, opts, nil, &s.AccountInfo)
|
||||||
|
if err == nil {
|
||||||
|
s.Cookies = resp.Cookies()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate2FACode validates the 2FA code
|
||||||
|
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
|
||||||
|
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := s.GetAuthHeaders(map[string]string{})
|
||||||
|
headers["scnt"] = s.Scnt
|
||||||
|
headers["X-Apple-ID-Session-Id"] = s.SessionID
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/verify/trusteddevice/securitycode",
|
||||||
|
ExtraHeaders: headers,
|
||||||
|
RootURL: authEndpoint,
|
||||||
|
Body: body,
|
||||||
|
NoResponse: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.Request(ctx, opts, nil, nil)
|
||||||
|
if err == nil {
|
||||||
|
if err := s.TrustSession(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("validate2FACode failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrustSession trusts the session
|
||||||
|
func (s *Session) TrustSession(ctx context.Context) error {
|
||||||
|
headers := s.GetAuthHeaders(map[string]string{})
|
||||||
|
headers["scnt"] = s.Scnt
|
||||||
|
headers["X-Apple-ID-Session-Id"] = s.SessionID
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/2sv/trust",
|
||||||
|
ExtraHeaders: headers,
|
||||||
|
RootURL: authEndpoint,
|
||||||
|
NoResponse: true,
|
||||||
|
ContentLength: common.Int64(0),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := s.Request(ctx, opts, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("trustSession failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.AuthWithToken(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateSession validates the session
|
||||||
|
func (s *Session) ValidateSession(ctx context.Context) error {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/validate",
|
||||||
|
ExtraHeaders: s.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: setupEndpoint,
|
||||||
|
ContentLength: common.Int64(0),
|
||||||
|
}
|
||||||
|
_, err := s.Request(ctx, opts, nil, &s.AccountInfo)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("validateSession failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAuthHeaders returns the authentication headers for the session.
|
||||||
|
//
|
||||||
|
// It takes an `overwrite` map[string]string parameter which allows
|
||||||
|
// overwriting the default headers. It returns a map[string]string.
|
||||||
|
func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string {
|
||||||
|
headers := map[string]string{
|
||||||
|
"Accept": "application/json",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"X-Apple-OAuth-Client-Id": s.ClientID,
|
||||||
|
"X-Apple-OAuth-Client-Type": "firstPartyAuth",
|
||||||
|
"X-Apple-OAuth-Redirect-URI": "https://www.icloud.com",
|
||||||
|
"X-Apple-OAuth-Require-Grant-Code": "true",
|
||||||
|
"X-Apple-OAuth-Response-Mode": "web_message",
|
||||||
|
"X-Apple-OAuth-Response-Type": "code",
|
||||||
|
"X-Apple-OAuth-State": s.ClientID,
|
||||||
|
"X-Apple-Widget-Key": s.ClientID,
|
||||||
|
"Origin": homeEndpoint,
|
||||||
|
"Referer": fmt.Sprintf("%s/", homeEndpoint),
|
||||||
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||||
|
}
|
||||||
|
for k, v := range overwrite {
|
||||||
|
headers[k] = v
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeaders Gets the authentication headers required for a request
|
||||||
|
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
|
||||||
|
headers := GetCommonHeaders(map[string]string{})
|
||||||
|
headers["Cookie"] = s.GetCookieString()
|
||||||
|
for k, v := range overwrite {
|
||||||
|
headers[k] = v
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCookieString returns the cookie header string for the session.
|
||||||
|
func (s *Session) GetCookieString() string {
|
||||||
|
cookieHeader := ""
|
||||||
|
// we only care about name and value.
|
||||||
|
for _, cookie := range s.Cookies {
|
||||||
|
cookieHeader = cookieHeader + cookie.Name + "=" + cookie.Value + ";"
|
||||||
|
}
|
||||||
|
return cookieHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCommonHeaders generates common HTTP headers with optional overwrite.
|
||||||
|
func GetCommonHeaders(overwrite map[string]string) map[string]string {
|
||||||
|
headers := map[string]string{
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Origin": baseEndpoint,
|
||||||
|
"Referer": fmt.Sprintf("%s/", baseEndpoint),
|
||||||
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||||
|
}
|
||||||
|
for k, v := range overwrite {
|
||||||
|
headers[k] = v
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeCookies merges two slices of http.Cookies, ensuring no duplicates are added.
|
||||||
|
func MergeCookies(left []*http.Cookie, right []*http.Cookie) ([]*http.Cookie, error) {
|
||||||
|
var hashes []string
|
||||||
|
for _, cookie := range right {
|
||||||
|
hashes = append(hashes, cookie.Raw)
|
||||||
|
}
|
||||||
|
for _, cookie := range left {
|
||||||
|
if !slices.Contains(hashes, cookie.Raw) {
|
||||||
|
right = append(right, cookie)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return right, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCookiesForDomain filters the provided cookies based on the domain of the given URL.
|
||||||
|
func GetCookiesForDomain(url *url.URL, cookies []*http.Cookie) ([]*http.Cookie, error) {
|
||||||
|
var domainCookies []*http.Cookie
|
||||||
|
for _, cookie := range cookies {
|
||||||
|
if strings.HasSuffix(url.Host, cookie.Domain) {
|
||||||
|
domainCookies = append(domainCookies, cookie)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return domainCookies, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSession creates a new Session instance with default values.
|
||||||
|
func NewSession() *Session {
|
||||||
|
session := &Session{}
|
||||||
|
session.srv = rest.NewClient(fshttp.NewClient(context.Background())).SetRoot(baseEndpoint)
|
||||||
|
//session.ClientID = "auth-" + uuid.New().String()
|
||||||
|
return session
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountInfo represents an account info
|
||||||
|
type AccountInfo struct {
|
||||||
|
DsInfo *ValidateDataDsInfo `json:"dsInfo"`
|
||||||
|
HasMinimumDeviceForPhotosWeb bool `json:"hasMinimumDeviceForPhotosWeb"`
|
||||||
|
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||||
|
Webservices map[string]*webService `json:"webservices"`
|
||||||
|
PcsEnabled bool `json:"pcsEnabled"`
|
||||||
|
TermsUpdateNeeded bool `json:"termsUpdateNeeded"`
|
||||||
|
ConfigBag struct {
|
||||||
|
Urls struct {
|
||||||
|
AccountCreateUI string `json:"accountCreateUI"`
|
||||||
|
AccountLoginUI string `json:"accountLoginUI"`
|
||||||
|
AccountLogin string `json:"accountLogin"`
|
||||||
|
AccountRepairUI string `json:"accountRepairUI"`
|
||||||
|
DownloadICloudTerms string `json:"downloadICloudTerms"`
|
||||||
|
RepairDone string `json:"repairDone"`
|
||||||
|
AccountAuthorizeUI string `json:"accountAuthorizeUI"`
|
||||||
|
VettingURLForEmail string `json:"vettingUrlForEmail"`
|
||||||
|
AccountCreate string `json:"accountCreate"`
|
||||||
|
GetICloudTerms string `json:"getICloudTerms"`
|
||||||
|
VettingURLForPhone string `json:"vettingUrlForPhone"`
|
||||||
|
} `json:"urls"`
|
||||||
|
AccountCreateEnabled bool `json:"accountCreateEnabled"`
|
||||||
|
} `json:"configBag"`
|
||||||
|
HsaTrustedBrowser bool `json:"hsaTrustedBrowser"`
|
||||||
|
AppsOrder []string `json:"appsOrder"`
|
||||||
|
Version int `json:"version"`
|
||||||
|
IsExtendedLogin bool `json:"isExtendedLogin"`
|
||||||
|
PcsServiceIdentitiesIncluded bool `json:"pcsServiceIdentitiesIncluded"`
|
||||||
|
IsRepairNeeded bool `json:"isRepairNeeded"`
|
||||||
|
HsaChallengeRequired bool `json:"hsaChallengeRequired"`
|
||||||
|
RequestInfo struct {
|
||||||
|
Country string `json:"country"`
|
||||||
|
TimeZone string `json:"timeZone"`
|
||||||
|
Region string `json:"region"`
|
||||||
|
} `json:"requestInfo"`
|
||||||
|
PcsDeleted bool `json:"pcsDeleted"`
|
||||||
|
ICloudInfo struct {
|
||||||
|
SafariBookmarksHasMigratedToCloudKit bool `json:"SafariBookmarksHasMigratedToCloudKit"`
|
||||||
|
} `json:"iCloudInfo"`
|
||||||
|
Apps map[string]*ValidateDataApp `json:"apps"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateDataDsInfo represents an validation info
|
||||||
|
type ValidateDataDsInfo struct {
|
||||||
|
HsaVersion int `json:"hsaVersion"`
|
||||||
|
LastName string `json:"lastName"`
|
||||||
|
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||||
|
TantorMigrated bool `json:"tantorMigrated"`
|
||||||
|
Dsid string `json:"dsid"`
|
||||||
|
HsaEnabled bool `json:"hsaEnabled"`
|
||||||
|
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
|
||||||
|
IroncadeMigrated bool `json:"ironcadeMigrated"`
|
||||||
|
Locale string `json:"locale"`
|
||||||
|
BrZoneConsolidated bool `json:"brZoneConsolidated"`
|
||||||
|
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
|
||||||
|
IsManagedAppleID bool `json:"isManagedAppleID"`
|
||||||
|
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
|
||||||
|
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
|
||||||
|
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
|
||||||
|
Gilligvited bool `json:"gilligvited"`
|
||||||
|
AppleIDAliases []interface{} `json:"appleIdAliases"`
|
||||||
|
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
|
||||||
|
IsPaidDeveloper bool `json:"isPaidDeveloper"`
|
||||||
|
CountryCode string `json:"countryCode"`
|
||||||
|
NotificationID string `json:"notificationId"`
|
||||||
|
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
|
||||||
|
ADsID string `json:"aDsID"`
|
||||||
|
Locked bool `json:"locked"`
|
||||||
|
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
|
||||||
|
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
|
||||||
|
PrimaryEmail string `json:"primaryEmail"`
|
||||||
|
AppleIDEntries []struct {
|
||||||
|
IsPrimary bool `json:"isPrimary"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Value string `json:"value"`
|
||||||
|
} `json:"appleIdEntries"`
|
||||||
|
GilliganEnabled bool `json:"gilligan-enabled"`
|
||||||
|
IsWebAccessAllowed bool `json:"isWebAccessAllowed"`
|
||||||
|
FullName string `json:"fullName"`
|
||||||
|
MailFlags struct {
|
||||||
|
IsThreadingAvailable bool `json:"isThreadingAvailable"`
|
||||||
|
IsSearchV2Provisioned bool `json:"isSearchV2Provisioned"`
|
||||||
|
SCKMail bool `json:"sCKMail"`
|
||||||
|
IsMppSupportedInCurrentCountry bool `json:"isMppSupportedInCurrentCountry"`
|
||||||
|
} `json:"mailFlags"`
|
||||||
|
LanguageCode string `json:"languageCode"`
|
||||||
|
AppleID string `json:"appleId"`
|
||||||
|
HasUnreleasedOS bool `json:"hasUnreleasedOS"`
|
||||||
|
AnalyticsOptInStatus bool `json:"analyticsOptInStatus"`
|
||||||
|
FirstName string `json:"firstName"`
|
||||||
|
ICloudAppleIDAlias string `json:"iCloudAppleIdAlias"`
|
||||||
|
NotesMigrated bool `json:"notesMigrated"`
|
||||||
|
BeneficiaryInfo struct {
|
||||||
|
IsBeneficiary bool `json:"isBeneficiary"`
|
||||||
|
} `json:"beneficiaryInfo"`
|
||||||
|
HasPaymentInfo bool `json:"hasPaymentInfo"`
|
||||||
|
PcsDelet bool `json:"pcsDelet"`
|
||||||
|
AppleIDAlias string `json:"appleIdAlias"`
|
||||||
|
BrMigrated bool `json:"brMigrated"`
|
||||||
|
StatusCode int `json:"statusCode"`
|
||||||
|
FamilyEligible bool `json:"familyEligible"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateDataApp represents an app
|
||||||
|
type ValidateDataApp struct {
|
||||||
|
CanLaunchWithOneFactor bool `json:"canLaunchWithOneFactor"`
|
||||||
|
IsQualifiedForBeta bool `json:"isQualifiedForBeta"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WebService represents a web service
|
||||||
|
type webService struct {
|
||||||
|
PcsRequired bool `json:"pcsRequired"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
UploadURL string `json:"uploadUrl"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
1174
backend/iclouddrive/iclouddrive.go
Normal file
1174
backend/iclouddrive/iclouddrive.go
Normal file
File diff suppressed because it is too large
Load Diff
18
backend/iclouddrive/iclouddrive_test.go
Normal file
18
backend/iclouddrive/iclouddrive_test.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
//go:build !plan9 && !solaris
|
||||||
|
|
||||||
|
package iclouddrive_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/iclouddrive"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestICloudDrive:",
|
||||||
|
NilObject: (*iclouddrive.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
7
backend/iclouddrive/iclouddrive_unsupported.go
Normal file
7
backend/iclouddrive/iclouddrive_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Build for iclouddrive for unsupported platforms to stop go complaining
|
||||||
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
|
//go:build plan9 || solaris
|
||||||
|
|
||||||
|
// Package iclouddrive implements the iCloud Drive backend
|
||||||
|
package iclouddrive
|
||||||
66
backend/imagekit/client/client.go
Normal file
66
backend/imagekit/client/client.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
// Package client provides a client for interacting with the ImageKit API.
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImageKit main struct
|
||||||
|
type ImageKit struct {
|
||||||
|
Prefix string
|
||||||
|
UploadPrefix string
|
||||||
|
Timeout int64
|
||||||
|
UploadTimeout int64
|
||||||
|
PrivateKey string
|
||||||
|
PublicKey string
|
||||||
|
URLEndpoint string
|
||||||
|
HTTPClient *rest.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewParams is a struct to define parameters to imagekit
|
||||||
|
type NewParams struct {
|
||||||
|
PrivateKey string
|
||||||
|
PublicKey string
|
||||||
|
URLEndpoint string
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns ImageKit object from environment variables
|
||||||
|
func New(ctx context.Context, params NewParams) (*ImageKit, error) {
|
||||||
|
|
||||||
|
privateKey := params.PrivateKey
|
||||||
|
publicKey := params.PublicKey
|
||||||
|
endpointURL := params.URLEndpoint
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case privateKey == "":
|
||||||
|
return nil, fmt.Errorf("ImageKit.io URL endpoint is required")
|
||||||
|
case publicKey == "":
|
||||||
|
return nil, fmt.Errorf("ImageKit.io public key is required")
|
||||||
|
case endpointURL == "":
|
||||||
|
return nil, fmt.Errorf("ImageKit.io private key is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
cliCtx, cliCfg := fs.AddConfig(ctx)
|
||||||
|
|
||||||
|
cliCfg.UserAgent = "rclone/imagekit"
|
||||||
|
client := rest.NewClient(fshttp.NewClient(cliCtx))
|
||||||
|
|
||||||
|
client.SetUserPass(privateKey, "")
|
||||||
|
client.SetHeader("Accept", "application/json")
|
||||||
|
|
||||||
|
return &ImageKit{
|
||||||
|
Prefix: "https://api.imagekit.io/v2",
|
||||||
|
UploadPrefix: "https://upload.imagekit.io/api/v2",
|
||||||
|
Timeout: 60,
|
||||||
|
UploadTimeout: 3600,
|
||||||
|
PrivateKey: params.PrivateKey,
|
||||||
|
PublicKey: params.PublicKey,
|
||||||
|
URLEndpoint: params.URLEndpoint,
|
||||||
|
HTTPClient: client,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
252
backend/imagekit/client/media.go
Normal file
252
backend/imagekit/client/media.go
Normal file
@@ -0,0 +1,252 @@
|
|||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
"gopkg.in/validator.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FilesOrFolderParam struct is a parameter type to ListFiles() function to search / list media library files.
|
||||||
|
type FilesOrFolderParam struct {
|
||||||
|
Path string `json:"path,omitempty"`
|
||||||
|
Limit int `json:"limit,omitempty"`
|
||||||
|
Skip int `json:"skip,omitempty"`
|
||||||
|
SearchQuery string `json:"searchQuery,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AITag represents an AI tag for a media library file.
|
||||||
|
type AITag struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Confidence float32 `json:"confidence"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// File represents media library File details.
|
||||||
|
type File struct {
|
||||||
|
FileID string `json:"fileId"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
FilePath string `json:"filePath"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
VersionInfo map[string]string `json:"versionInfo"`
|
||||||
|
IsPrivateFile *bool `json:"isPrivateFile"`
|
||||||
|
CustomCoordinates *string `json:"customCoordinates"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
Thumbnail string `json:"thumbnail"`
|
||||||
|
FileType string `json:"fileType"`
|
||||||
|
Mime string `json:"mime"`
|
||||||
|
Height int `json:"height"`
|
||||||
|
Width int `json:"Width"`
|
||||||
|
Size uint64 `json:"size"`
|
||||||
|
HasAlpha bool `json:"hasAlpha"`
|
||||||
|
CustomMetadata map[string]any `json:"customMetadata,omitempty"`
|
||||||
|
EmbeddedMetadata map[string]any `json:"embeddedMetadata"`
|
||||||
|
CreatedAt time.Time `json:"createdAt"`
|
||||||
|
UpdatedAt time.Time `json:"updatedAt"`
|
||||||
|
Tags []string `json:"tags"`
|
||||||
|
AITags []AITag `json:"AITags"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Folder represents media library Folder details.
|
||||||
|
type Folder struct {
|
||||||
|
*File
|
||||||
|
FolderPath string `json:"folderPath"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateFolderParam represents parameter to create folder api
|
||||||
|
type CreateFolderParam struct {
|
||||||
|
FolderName string `validate:"nonzero" json:"folderName"`
|
||||||
|
ParentFolderPath string `validate:"nonzero" json:"parentFolderPath"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFolderParam represents parameter to delete folder api
|
||||||
|
type DeleteFolderParam struct {
|
||||||
|
FolderPath string `validate:"nonzero" json:"folderPath"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFolderParam represents parameter to move folder api
|
||||||
|
type MoveFolderParam struct {
|
||||||
|
SourceFolderPath string `validate:"nonzero" json:"sourceFolderPath"`
|
||||||
|
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JobIDResponse respresents response struct with JobID for folder operations
|
||||||
|
type JobIDResponse struct {
|
||||||
|
JobID string `json:"jobId"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JobStatus represents response Data to job status api
|
||||||
|
type JobStatus struct {
|
||||||
|
JobID string `json:"jobId"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// File represents media library File details.
|
||||||
|
func (ik *ImageKit) File(ctx context.Context, fileID string) (*http.Response, *File, error) {
|
||||||
|
data := &File{}
|
||||||
|
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: fmt.Sprintf("/files/%s/details", fileID),
|
||||||
|
RootURL: ik.Prefix,
|
||||||
|
IgnoreStatus: true,
|
||||||
|
}, nil, data)
|
||||||
|
|
||||||
|
return response, data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Files retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
|
||||||
|
func (ik *ImageKit) Files(ctx context.Context, params FilesOrFolderParam, includeVersion bool) (*http.Response, *[]File, error) {
|
||||||
|
var SearchQuery = `type = "file"`
|
||||||
|
|
||||||
|
if includeVersion {
|
||||||
|
SearchQuery = `type IN ["file", "file-version"]`
|
||||||
|
}
|
||||||
|
if params.SearchQuery != "" {
|
||||||
|
SearchQuery = params.SearchQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
parameters := url.Values{}
|
||||||
|
|
||||||
|
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
|
||||||
|
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
|
||||||
|
parameters.Set("path", params.Path)
|
||||||
|
parameters.Set("searchQuery", SearchQuery)
|
||||||
|
|
||||||
|
data := &[]File{}
|
||||||
|
|
||||||
|
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/files",
|
||||||
|
RootURL: ik.Prefix,
|
||||||
|
Parameters: parameters,
|
||||||
|
}, nil, data)
|
||||||
|
|
||||||
|
return response, data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile removes file by FileID from media library
|
||||||
|
func (ik *ImageKit) DeleteFile(ctx context.Context, fileID string) (*http.Response, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if fileID == "" {
|
||||||
|
return nil, errors.New("fileID can not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||||
|
Method: "DELETE",
|
||||||
|
Path: fmt.Sprintf("/files/%s", fileID),
|
||||||
|
RootURL: ik.Prefix,
|
||||||
|
NoResponse: true,
|
||||||
|
}, nil, nil)
|
||||||
|
|
||||||
|
return response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Folders retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
|
||||||
|
func (ik *ImageKit) Folders(ctx context.Context, params FilesOrFolderParam) (*http.Response, *[]Folder, error) {
|
||||||
|
var SearchQuery = `type = "folder"`
|
||||||
|
|
||||||
|
if params.SearchQuery != "" {
|
||||||
|
SearchQuery = params.SearchQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
parameters := url.Values{}
|
||||||
|
|
||||||
|
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
|
||||||
|
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
|
||||||
|
parameters.Set("path", params.Path)
|
||||||
|
parameters.Set("searchQuery", SearchQuery)
|
||||||
|
|
||||||
|
data := &[]Folder{}
|
||||||
|
|
||||||
|
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/files",
|
||||||
|
RootURL: ik.Prefix,
|
||||||
|
Parameters: parameters,
|
||||||
|
}, nil, data)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return resp, data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateFolder creates a new folder in media library
|
||||||
|
func (ik *ImageKit) CreateFolder(ctx context.Context, param CreateFolderParam) (*http.Response, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if err = validator.Validate(¶m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/folder",
|
||||||
|
RootURL: ik.Prefix,
|
||||||
|
NoResponse: true,
|
||||||
|
}, param, nil)
|
||||||
|
|
||||||
|
return response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFolder removes the folder from media library
|
||||||
|
func (ik *ImageKit) DeleteFolder(ctx context.Context, param DeleteFolderParam) (*http.Response, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if err = validator.Validate(¶m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||||
|
Method: "DELETE",
|
||||||
|
Path: "/folder",
|
||||||
|
RootURL: ik.Prefix,
|
||||||
|
NoResponse: true,
|
||||||
|
}, param, nil)
|
||||||
|
|
||||||
|
return response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFolder moves given folder to new path in media library
|
||||||
|
func (ik *ImageKit) MoveFolder(ctx context.Context, param MoveFolderParam) (*http.Response, *JobIDResponse, error) {
|
||||||
|
var err error
|
||||||
|
var response = &JobIDResponse{}
|
||||||
|
|
||||||
|
if err = validator.Validate(¶m); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: "bulkJobs/moveFolder",
|
||||||
|
RootURL: ik.Prefix,
|
||||||
|
}, param, response)
|
||||||
|
|
||||||
|
return resp, response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// BulkJobStatus retrieves the status of a bulk job by job ID.
|
||||||
|
func (ik *ImageKit) BulkJobStatus(ctx context.Context, jobID string) (*http.Response, *JobStatus, error) {
|
||||||
|
var err error
|
||||||
|
var response = &JobStatus{}
|
||||||
|
|
||||||
|
if jobID == "" {
|
||||||
|
return nil, nil, errors.New("jobId can not be blank")
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "bulkJobs/" + jobID,
|
||||||
|
RootURL: ik.Prefix,
|
||||||
|
}, nil, response)
|
||||||
|
|
||||||
|
return resp, response, err
|
||||||
|
}
|
||||||
96
backend/imagekit/client/upload.go
Normal file
96
backend/imagekit/client/upload.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UploadParam defines upload parameters
|
||||||
|
type UploadParam struct {
|
||||||
|
FileName string `json:"fileName"`
|
||||||
|
Folder string `json:"folder,omitempty"` // default value: /
|
||||||
|
Tags string `json:"tags,omitempty"`
|
||||||
|
IsPrivateFile *bool `json:"isPrivateFile,omitempty"` // default: false
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadResult defines the response structure for the upload API
|
||||||
|
type UploadResult struct {
|
||||||
|
FileID string `json:"fileId"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
ThumbnailURL string `json:"thumbnailUrl"`
|
||||||
|
Height int `json:"height"`
|
||||||
|
Width int `json:"Width"`
|
||||||
|
Size uint64 `json:"size"`
|
||||||
|
FilePath string `json:"filePath"`
|
||||||
|
AITags []map[string]any `json:"AITags"`
|
||||||
|
VersionInfo map[string]string `json:"versionInfo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload uploads an asset to a imagekit account.
|
||||||
|
//
|
||||||
|
// The asset can be:
|
||||||
|
// - the actual data (io.Reader)
|
||||||
|
// - the Data URI (Base64 encoded), max ~60 MB (62,910,000 chars)
|
||||||
|
// - the remote FTP, HTTP or HTTPS URL address of an existing file
|
||||||
|
//
|
||||||
|
// https://docs.imagekit.io/api-reference/upload-file-api/server-side-file-upload
|
||||||
|
func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadParam) (*http.Response, *UploadResult, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if param.FileName == "" {
|
||||||
|
return nil, nil, errors.New("Upload: Filename is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize URL values
|
||||||
|
formParams := url.Values{}
|
||||||
|
|
||||||
|
formParams.Add("useUniqueFileName", fmt.Sprint(false))
|
||||||
|
|
||||||
|
// Add individual fields to URL values
|
||||||
|
if param.FileName != "" {
|
||||||
|
formParams.Add("fileName", param.FileName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if param.Tags != "" {
|
||||||
|
formParams.Add("tags", param.Tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
if param.Folder != "" {
|
||||||
|
formParams.Add("folder", param.Folder)
|
||||||
|
}
|
||||||
|
|
||||||
|
if param.IsPrivateFile != nil {
|
||||||
|
formParams.Add("isPrivateFile", fmt.Sprintf("%v", *param.IsPrivateFile))
|
||||||
|
}
|
||||||
|
|
||||||
|
response := &UploadResult{}
|
||||||
|
|
||||||
|
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/files/upload",
|
||||||
|
RootURL: ik.UploadPrefix,
|
||||||
|
Body: formReader,
|
||||||
|
ContentType: contentType,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := ik.HTTPClient.CallJSON(ctx, &opts, nil, response)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return resp, response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, response, err
|
||||||
|
}
|
||||||
72
backend/imagekit/client/url.go
Normal file
72
backend/imagekit/client/url.go
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
neturl "net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// URLParam represents parameters for generating url
|
||||||
|
type URLParam struct {
|
||||||
|
Path string
|
||||||
|
Src string
|
||||||
|
URLEndpoint string
|
||||||
|
Signed bool
|
||||||
|
ExpireSeconds int64
|
||||||
|
QueryParameters map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// URL generates url from URLParam
|
||||||
|
func (ik *ImageKit) URL(params URLParam) (string, error) {
|
||||||
|
var resultURL string
|
||||||
|
var url *neturl.URL
|
||||||
|
var err error
|
||||||
|
var endpoint = params.URLEndpoint
|
||||||
|
|
||||||
|
if endpoint == "" {
|
||||||
|
endpoint = ik.URLEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoint = strings.TrimRight(endpoint, "/") + "/"
|
||||||
|
|
||||||
|
if params.QueryParameters == nil {
|
||||||
|
params.QueryParameters = make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if url, err = neturl.Parse(params.Src); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
query := url.Query()
|
||||||
|
|
||||||
|
for k, v := range params.QueryParameters {
|
||||||
|
query.Set(k, v)
|
||||||
|
}
|
||||||
|
url.RawQuery = query.Encode()
|
||||||
|
resultURL = url.String()
|
||||||
|
|
||||||
|
if params.Signed {
|
||||||
|
now := time.Now().Unix()
|
||||||
|
|
||||||
|
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
|
||||||
|
var path = strings.Replace(resultURL, endpoint, "", 1)
|
||||||
|
|
||||||
|
path += expires
|
||||||
|
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
|
||||||
|
mac.Write([]byte(path))
|
||||||
|
signature := hex.EncodeToString(mac.Sum(nil))
|
||||||
|
|
||||||
|
if strings.Contains(resultURL, "?") {
|
||||||
|
resultURL = resultURL + "&" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature)
|
||||||
|
} else {
|
||||||
|
resultURL = resultURL + "?" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resultURL, nil
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user