mirror of
https://github.com/rclone/rclone.git
synced 2026-01-20 09:23:21 +00:00
Compare commits
673 Commits
pr-7789-on
...
fix-8379-h
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d7f23b6b97 | ||
|
|
539e96cc1f | ||
|
|
5086aad0b2 | ||
|
|
c1b414e2cf | ||
|
|
2ff8aa1c20 | ||
|
|
6d2a72367a | ||
|
|
9df751d4ec | ||
|
|
e175c863aa | ||
|
|
64cd8ae0f0 | ||
|
|
46b498b86a | ||
|
|
b76cd74087 | ||
|
|
3b49fd24d4 | ||
|
|
c0515a51a5 | ||
|
|
dc9c87279b | ||
|
|
057fdb3a9d | ||
|
|
3daf62cf3d | ||
|
|
0ef495fa76 | ||
|
|
722c567504 | ||
|
|
0ebe1c0f81 | ||
|
|
2dc06b2548 | ||
|
|
b52aabd8fe | ||
|
|
6494ac037f | ||
|
|
5c3a1bbf30 | ||
|
|
c837664653 | ||
|
|
77429b154e | ||
|
|
39b8f17ebb | ||
|
|
81ecfb0f64 | ||
|
|
656e789c5b | ||
|
|
fe19184084 | ||
|
|
b4990cd858 | ||
|
|
8e955c6b13 | ||
|
|
3a5ddfcd3c | ||
|
|
ac3f7a87c3 | ||
|
|
4e9b63e141 | ||
|
|
7fd7fe3c82 | ||
|
|
9dff45563d | ||
|
|
83cf8fb821 | ||
|
|
32e79a5c5c | ||
|
|
fc44a8114e | ||
|
|
657172ef77 | ||
|
|
71eb4199c3 | ||
|
|
ac3c21368d | ||
|
|
db71b2bd5f | ||
|
|
8cfe42d09f | ||
|
|
e673a28a72 | ||
|
|
59889ce46b | ||
|
|
62e8a01e7e | ||
|
|
87eaf37629 | ||
|
|
7c7606a6cf | ||
|
|
dbb21165d4 | ||
|
|
375953cba3 | ||
|
|
af5385b344 | ||
|
|
347be176af | ||
|
|
bf5a4774c6 | ||
|
|
0275d3edf2 | ||
|
|
be53ae98f8 | ||
|
|
0d9fe51632 | ||
|
|
03bd795221 | ||
|
|
5a4026ccb4 | ||
|
|
b1d4de69c2 | ||
|
|
5316acd046 | ||
|
|
2c72842c10 | ||
|
|
4a81f12c26 | ||
|
|
aabda1cda2 | ||
|
|
572fe20f8e | ||
|
|
2fd4c45b34 | ||
|
|
ec5489e23f | ||
|
|
6898375a2d | ||
|
|
d413443a6a | ||
|
|
5039747f26 | ||
|
|
11ba4ac539 | ||
|
|
b4ed7fb7d7 | ||
|
|
719473565e | ||
|
|
bd7278d7e9 | ||
|
|
45ba81c726 | ||
|
|
530658e0cc | ||
|
|
b742705d0c | ||
|
|
cd3b08d8cf | ||
|
|
009660a489 | ||
|
|
4b6c7c6d84 | ||
|
|
a7db375f5d | ||
|
|
101dcfe157 | ||
|
|
aec87b74d3 | ||
|
|
91c8f92ccb | ||
|
|
965bf19065 | ||
|
|
15ef3b90fa | ||
|
|
f6efaf2a63 | ||
|
|
0e7c495395 | ||
|
|
ff0ded8f11 | ||
|
|
110bf468a4 | ||
|
|
d4e86f4d8b | ||
|
|
6091a0362b | ||
|
|
33d2747829 | ||
|
|
c9e5f45d73 | ||
|
|
2f66537514 | ||
|
|
a491312c7d | ||
|
|
45b7690867 | ||
|
|
30ef1ddb23 | ||
|
|
424d8e3123 | ||
|
|
04dfa6d923 | ||
|
|
fdff1a54ee | ||
|
|
42240f4b5d | ||
|
|
7692ef289f | ||
|
|
bfb7b88371 | ||
|
|
5f70918e2c | ||
|
|
abf11271fe | ||
|
|
a36e89bb61 | ||
|
|
35614acf59 | ||
|
|
7e4b8e33f5 | ||
|
|
5151a663f0 | ||
|
|
b85a1b684b | ||
|
|
8fa8f146fa | ||
|
|
6cad0a013e | ||
|
|
aa743cbc60 | ||
|
|
a389a2979b | ||
|
|
d6f0d1d349 | ||
|
|
4ed6960d95 | ||
|
|
731af0c0ab | ||
|
|
5499fd3b59 | ||
|
|
e0e697ca11 | ||
|
|
05f000b076 | ||
|
|
a34c839514 | ||
|
|
6a217c7dc1 | ||
|
|
e1748a3183 | ||
|
|
bc08e05a00 | ||
|
|
9218b69afe | ||
|
|
0ce2e12d9f | ||
|
|
7224b76801 | ||
|
|
d2398ccb59 | ||
|
|
0988fd9e9f | ||
|
|
51cde23e82 | ||
|
|
caac95ff54 | ||
|
|
19f4580aca | ||
|
|
27f448d14d | ||
|
|
500698c5be | ||
|
|
91af6da068 | ||
|
|
b8835fe7b4 | ||
|
|
48d9e88e8f | ||
|
|
4e7ee9310e | ||
|
|
d629102fa6 | ||
|
|
db1ed69693 | ||
|
|
06657c49a0 | ||
|
|
f1d2f2b2c8 | ||
|
|
a5abe4b8b3 | ||
|
|
c0339327be | ||
|
|
353bc3130e | ||
|
|
126f00882b | ||
|
|
44c3f5e1e8 | ||
|
|
c47c94e485 | ||
|
|
1f328fbcfd | ||
|
|
7f1240516e | ||
|
|
f9946b37f9 | ||
|
|
96fe25cf0a | ||
|
|
a176d4cbda | ||
|
|
e704e33045 | ||
|
|
2f3e90f671 | ||
|
|
65012beea4 | ||
|
|
704217b698 | ||
|
|
6ade1055d5 | ||
|
|
6a983d601c | ||
|
|
eaafae95fa | ||
|
|
5ca1436c24 | ||
|
|
c46e93cc42 | ||
|
|
66943d3d79 | ||
|
|
a78bc093de | ||
|
|
2446c4928d | ||
|
|
e11e679e90 | ||
|
|
ba8e538173 | ||
|
|
40111ba5e1 | ||
|
|
ab58ae5b03 | ||
|
|
ca8860177e | ||
|
|
d65d1a44b3 | ||
|
|
c1763a3f95 | ||
|
|
964fcd5f59 | ||
|
|
c6281a1217 | ||
|
|
ff3f8f0b33 | ||
|
|
2d844a26c3 | ||
|
|
1b68492c85 | ||
|
|
acd5a893e2 | ||
|
|
0214a59a8c | ||
|
|
6079cab090 | ||
|
|
bf57087a6e | ||
|
|
d8bc542ffc | ||
|
|
01ccf204f4 | ||
|
|
84b64dcdf9 | ||
|
|
8cc1020a58 | ||
|
|
1e2b354456 | ||
|
|
f639cd9c78 | ||
|
|
e50f995d87 | ||
|
|
abe884e744 | ||
|
|
173b2ac956 | ||
|
|
1317fdb9b8 | ||
|
|
1072173d58 | ||
|
|
df19c6f7bf | ||
|
|
ee72554fb9 | ||
|
|
abb4f77568 | ||
|
|
ca2b27422f | ||
|
|
740f6b318c | ||
|
|
f307d929a8 | ||
|
|
ceea6753ee | ||
|
|
2bafbf3c04 | ||
|
|
3e14ba54b8 | ||
|
|
2f7a30cf61 | ||
|
|
0ad925278d | ||
|
|
e3053350f3 | ||
|
|
b9207e5727 | ||
|
|
40159e7a16 | ||
|
|
16baa24964 | ||
|
|
72f06bcc4b | ||
|
|
c527dd8c9c | ||
|
|
29fd894189 | ||
|
|
175aa07cdd | ||
|
|
75257fc9cd | ||
|
|
53ff3b3b32 | ||
|
|
8b4b59412d | ||
|
|
264c9fb2c0 | ||
|
|
1b10cd3732 | ||
|
|
d97492cbc3 | ||
|
|
82a510e793 | ||
|
|
9f2c590e13 | ||
|
|
11a90917ec | ||
|
|
8ca7b2af07 | ||
|
|
a19ddffe92 | ||
|
|
3e2c0f8c04 | ||
|
|
589458d1fe | ||
|
|
69897b97fb | ||
|
|
4db09331c6 | ||
|
|
fcd3b88332 | ||
|
|
1ca3f12672 | ||
|
|
e7a0fd0f70 | ||
|
|
c23c59544d | ||
|
|
9dec3de990 | ||
|
|
5caa695c79 | ||
|
|
8400809900 | ||
|
|
e49516d5f4 | ||
|
|
9614fc60f2 | ||
|
|
51db76fd47 | ||
|
|
17e7ccfad5 | ||
|
|
8a6fc8535d | ||
|
|
c053429b9c | ||
|
|
18989fbf85 | ||
|
|
a7451c6a77 | ||
|
|
5147d1101c | ||
|
|
11ad2a1316 | ||
|
|
3c7ad8d961 | ||
|
|
a3e8fb584a | ||
|
|
9b4b3033da | ||
|
|
94997d25d2 | ||
|
|
19458e8459 | ||
|
|
7d32da441e | ||
|
|
22e13eea47 | ||
|
|
de9b593f02 | ||
|
|
b2b4f8196c | ||
|
|
84cebb6872 | ||
|
|
cb9f4f8461 | ||
|
|
498d9cfa85 | ||
|
|
109e4ed0ed | ||
|
|
353270263a | ||
|
|
f8d782c02d | ||
|
|
3dec664a19 | ||
|
|
a849fd59f0 | ||
|
|
462a1cf491 | ||
|
|
0b7b3cacdc | ||
|
|
976103d50b | ||
|
|
192524c004 | ||
|
|
28667f58bf | ||
|
|
c669f4e218 | ||
|
|
1a9e6a527d | ||
|
|
8c48cadd9c | ||
|
|
76e1ba8c46 | ||
|
|
232e4cd18f | ||
|
|
88141928f2 | ||
|
|
a2a0388036 | ||
|
|
48543d38e8 | ||
|
|
eceb390152 | ||
|
|
f4deffdc96 | ||
|
|
c172742cef | ||
|
|
7daed30754 | ||
|
|
b1b4c7f27b | ||
|
|
ed84553dc1 | ||
|
|
c94edbb76b | ||
|
|
2dcb327bc0 | ||
|
|
874d66658e | ||
|
|
3af757e26d | ||
|
|
fef1b61585 | ||
|
|
3fca7a60a5 | ||
|
|
6b3f41fa0c | ||
|
|
3d0ee47aa2 | ||
|
|
da70088b11 | ||
|
|
1bc9b94cf2 | ||
|
|
15a026d3be | ||
|
|
ad122c6f6f | ||
|
|
b155231cdd | ||
|
|
49f69196c2 | ||
|
|
3f7651291b | ||
|
|
796013dd06 | ||
|
|
e0da406ca7 | ||
|
|
9a02c04028 | ||
|
|
918185273f | ||
|
|
3f2074901a | ||
|
|
648afc7df4 | ||
|
|
16e0245a8e | ||
|
|
59acb9dfa9 | ||
|
|
bfec159504 | ||
|
|
842396c8a0 | ||
|
|
5f9a201b45 | ||
|
|
22583d0a5f | ||
|
|
f1466a429c | ||
|
|
e3b09211b8 | ||
|
|
156feff9f2 | ||
|
|
b29a22095f | ||
|
|
861c01caf5 | ||
|
|
f1a84d171e | ||
|
|
bcdfad3c83 | ||
|
|
88b0757288 | ||
|
|
33d6c3f92f | ||
|
|
752809309d | ||
|
|
4a54cc134f | ||
|
|
dfc2c98bbf | ||
|
|
604d6bcb9c | ||
|
|
d15704ef9f | ||
|
|
26bc9826e5 | ||
|
|
2a28b0eaf0 | ||
|
|
2d1c2b1f76 | ||
|
|
ffb2e2a6de | ||
|
|
c9c283533c | ||
|
|
71799d7efd | ||
|
|
8f4fdf6cc8 | ||
|
|
91b11f9eac | ||
|
|
b49927fbd0 | ||
|
|
1a8b7662e7 | ||
|
|
6ba3e24853 | ||
|
|
802a938bd1 | ||
|
|
9deb3e8adf | ||
|
|
296281a6eb | ||
|
|
711478554e | ||
|
|
906aef91fa | ||
|
|
6b58cd0870 | ||
|
|
af9f8ced80 | ||
|
|
c63f1865f3 | ||
|
|
1bb89bc818 | ||
|
|
a365503750 | ||
|
|
3bb6d0a42b | ||
|
|
f65755b3a3 | ||
|
|
33c5f35935 | ||
|
|
4367b999c9 | ||
|
|
b57e6213aa | ||
|
|
cd90ba4337 | ||
|
|
0e5eb7a9bb | ||
|
|
956c2963fd | ||
|
|
146562975b | ||
|
|
4c1cb0622e | ||
|
|
258092f9c6 | ||
|
|
be448c9e13 | ||
|
|
4e708e59f2 | ||
|
|
c8366dfef3 | ||
|
|
1e14523b82 | ||
|
|
da25305ba0 | ||
|
|
e439121ab2 | ||
|
|
37c12732f9 | ||
|
|
4c488e7517 | ||
|
|
7261f47bd2 | ||
|
|
1db8b20fbc | ||
|
|
a87d8967fc | ||
|
|
4804f1f1e9 | ||
|
|
d1c84f9115 | ||
|
|
e0b08883cb | ||
|
|
a0af72c27a | ||
|
|
28d6985764 | ||
|
|
f2ce9a9557 | ||
|
|
95151eac82 | ||
|
|
bd9bf4eb1c | ||
|
|
705c72d293 | ||
|
|
330c6702eb | ||
|
|
4d787ae87f | ||
|
|
86e9a56d73 | ||
|
|
64e8013c1b | ||
|
|
33bff6fe71 | ||
|
|
e82b5b11af | ||
|
|
4454ed9d3b | ||
|
|
bad8207378 | ||
|
|
c6d3714e73 | ||
|
|
59501fcdb6 | ||
|
|
afd199d756 | ||
|
|
00e073df1e | ||
|
|
2e007f89c7 | ||
|
|
edd9347694 | ||
|
|
1fad49ee35 | ||
|
|
182b2a6417 | ||
|
|
da9faf1ffe | ||
|
|
303358eeda | ||
|
|
62233b4993 | ||
|
|
498abcc062 | ||
|
|
482bfae8fa | ||
|
|
ae9960a4ed | ||
|
|
089c168fb9 | ||
|
|
6f515ded8f | ||
|
|
91c6faff71 | ||
|
|
874616a73e | ||
|
|
458d93ea7e | ||
|
|
513653910c | ||
|
|
bd5199910b | ||
|
|
f6d836eefd | ||
|
|
87ec26001f | ||
|
|
3e12612aae | ||
|
|
aee2480fc4 | ||
|
|
3ffa47ea16 | ||
|
|
70e8ad456f | ||
|
|
55b9b3e33a | ||
|
|
ce7dfa075c | ||
|
|
a697d27455 | ||
|
|
cae22a7562 | ||
|
|
877321c2fb | ||
|
|
574378e871 | ||
|
|
50d42babd8 | ||
|
|
13ea77dd71 | ||
|
|
62b76b631c | ||
|
|
96f92b7364 | ||
|
|
7c02a63884 | ||
|
|
67d4394a37 | ||
|
|
c1a98768bc | ||
|
|
bac9abebfb | ||
|
|
27b281ef69 | ||
|
|
10270a4354 | ||
|
|
d08b49d723 | ||
|
|
cb2d2d72a0 | ||
|
|
e686e34f89 | ||
|
|
5f66350331 | ||
|
|
e1d935b854 | ||
|
|
61b27cda80 | ||
|
|
83613634f9 | ||
|
|
1c80cbd13a | ||
|
|
9d5315a944 | ||
|
|
8d1d096c11 | ||
|
|
4b922d86d7 | ||
|
|
3b3625037c | ||
|
|
bfa3278f30 | ||
|
|
e334366345 | ||
|
|
642d4082ac | ||
|
|
024ff6ed15 | ||
|
|
d6b0743cf4 | ||
|
|
e4749cf0d0 | ||
|
|
8d2907d8f5 | ||
|
|
1720d3e11c | ||
|
|
c6352231e4 | ||
|
|
731947f3ca | ||
|
|
16d642825d | ||
|
|
50aebcf403 | ||
|
|
c8555d1b16 | ||
|
|
3ec0ff5d8f | ||
|
|
746516511d | ||
|
|
8aef1de695 | ||
|
|
cb611b8330 | ||
|
|
66ae050a8b | ||
|
|
fd9049c83d | ||
|
|
a1f52bcf50 | ||
|
|
0470450583 | ||
|
|
1901bae4eb | ||
|
|
9866d1c636 | ||
|
|
c5c7bcdd45 | ||
|
|
d5c7b55ba5 | ||
|
|
feafbfca52 | ||
|
|
abe01179ae | ||
|
|
612c717ea0 | ||
|
|
f26d2c6ba8 | ||
|
|
dcecb0ede4 | ||
|
|
47588a7fd0 | ||
|
|
ba381f8721 | ||
|
|
8f0ddcca4e | ||
|
|
404ef80025 | ||
|
|
13fa583368 | ||
|
|
e111ffba9e | ||
|
|
30ba7542ff | ||
|
|
31fabb3402 | ||
|
|
b3edc9d360 | ||
|
|
04f35fc3ac | ||
|
|
8e5dd79e4d | ||
|
|
b809e71d6f | ||
|
|
d149d1ec3e | ||
|
|
3b51ad24b2 | ||
|
|
485aa90d13 | ||
|
|
8958d06456 | ||
|
|
ca24447090 | ||
|
|
d008381e59 | ||
|
|
14629c66f9 | ||
|
|
4824837eed | ||
|
|
5287a9b5fa | ||
|
|
f2ce1767f0 | ||
|
|
7f048ac901 | ||
|
|
b0d0e0b267 | ||
|
|
f5eef420a4 | ||
|
|
9de485f949 | ||
|
|
d4b29fef92 | ||
|
|
471531eb6a | ||
|
|
afd2663057 | ||
|
|
97d6a00483 | ||
|
|
5ddedae431 | ||
|
|
e1b7bf7701 | ||
|
|
2a615f4681 | ||
|
|
e041796bfe | ||
|
|
1b9217bc78 | ||
|
|
846c1aeed0 | ||
|
|
56caab2033 | ||
|
|
495a5759d3 | ||
|
|
d9bd6f35f2 | ||
|
|
532a0818f7 | ||
|
|
91558ce6aa | ||
|
|
8fbb259091 | ||
|
|
4d2bc190cc | ||
|
|
c2bf300dd8 | ||
|
|
c954c397d9 | ||
|
|
25c6379688 | ||
|
|
ce1859cd82 | ||
|
|
cf25ae69ad | ||
|
|
dce8317042 | ||
|
|
eff2497633 | ||
|
|
28ba4b832d | ||
|
|
58da1a165c | ||
|
|
eec95a164d | ||
|
|
44cd2e07ca | ||
|
|
a28287e96d | ||
|
|
fc1d8dafd5 | ||
|
|
2c57fe9826 | ||
|
|
7c51b10d15 | ||
|
|
3280b6b83c | ||
|
|
1a77a2f92b | ||
|
|
c156716d01 | ||
|
|
0d9d0eef4c | ||
|
|
2e653f8128 | ||
|
|
e79273f9c9 | ||
|
|
8e10fe71f7 | ||
|
|
c6ab37a59f | ||
|
|
671a15f65f | ||
|
|
8d72698d5a | ||
|
|
6e853c82d8 | ||
|
|
27267547b9 | ||
|
|
cdcf0e5cb8 | ||
|
|
6507770014 | ||
|
|
bd5799c079 | ||
|
|
c834eb7dcb | ||
|
|
754e53dbcc | ||
|
|
5511fa441a | ||
|
|
4ed4483bbc | ||
|
|
0e85ba5080 | ||
|
|
e5095a7d7b | ||
|
|
300851e8bf | ||
|
|
cbccad9491 | ||
|
|
9f1a7cfa67 | ||
|
|
d84a4c9ac1 | ||
|
|
1c9da8c96a | ||
|
|
af9c5fef93 | ||
|
|
7060777d1d | ||
|
|
0197e7f4e5 | ||
|
|
c1c9e209f3 | ||
|
|
fd182af866 | ||
|
|
4ea629446f | ||
|
|
93e8a976ef | ||
|
|
8470bdf810 | ||
|
|
1aa3a37a28 | ||
|
|
ae887ad042 | ||
|
|
d279fea44a | ||
|
|
282e34f2d5 | ||
|
|
021f25a748 | ||
|
|
18e9d039ad | ||
|
|
cbcfb90d9a | ||
|
|
caba22a585 | ||
|
|
3fef8016b5 | ||
|
|
edf6537c61 | ||
|
|
00f0e9df9d | ||
|
|
e6ab644350 | ||
|
|
61c18e3b60 | ||
|
|
d068e0b1a9 | ||
|
|
a341065b8d | ||
|
|
0c29a1fe31 | ||
|
|
1a40300b5f | ||
|
|
44be27729a | ||
|
|
b7624287ac | ||
|
|
6db9f7180f | ||
|
|
b601961e54 | ||
|
|
51aca9cf9d | ||
|
|
7c0645dda9 | ||
|
|
aed77a8fb2 | ||
|
|
4250dd98f3 | ||
|
|
c13118246c | ||
|
|
a56cd52025 | ||
|
|
3ae4534ce6 | ||
|
|
9c287c72d6 | ||
|
|
862d5d6086 | ||
|
|
003f4531fe | ||
|
|
a52e887ddd | ||
|
|
b7681e72bf | ||
|
|
ce5024bf33 | ||
|
|
d2af114139 | ||
|
|
c8d6b02dd6 | ||
|
|
55cac4c34d | ||
|
|
7ce60a47e8 | ||
|
|
27496fb26d | ||
|
|
39f8d039fe | ||
|
|
57f5ad188b | ||
|
|
76798d5bb1 | ||
|
|
5921bb0efd | ||
|
|
ce0d8a70a3 | ||
|
|
c23a40cb2a | ||
|
|
86a1951a56 | ||
|
|
b778ec0142 | ||
|
|
dac7f76b14 | ||
|
|
446d6b28b8 | ||
|
|
7e04ff9528 | ||
|
|
4568feb5f9 | ||
|
|
b9a2d3b6b9 | ||
|
|
775e567a7b | ||
|
|
59fc7ac193 | ||
|
|
fea61cac9e | ||
|
|
3f3e4b055e | ||
|
|
2257c03391 | ||
|
|
8f1c309c81 | ||
|
|
8e2f596fd0 | ||
|
|
de742ffc67 | ||
|
|
181ed55662 | ||
|
|
a5700a4a53 | ||
|
|
faa58315c5 | ||
|
|
7b89735ae7 | ||
|
|
91192c2c5e | ||
|
|
96e39ea486 | ||
|
|
488ed28635 | ||
|
|
b059c96322 | ||
|
|
6d22168a8c | ||
|
|
e34e2df600 | ||
|
|
6607102034 | ||
|
|
c6c327e4e7 | ||
|
|
6a0a54ab97 | ||
|
|
629e895da8 | ||
|
|
cc634213a5 | ||
|
|
e9e9feb21e | ||
|
|
f26fc8f07c | ||
|
|
96703bb31e | ||
|
|
96d3adc771 | ||
|
|
f82822baca | ||
|
|
af33a4f822 | ||
|
|
a675cc6677 | ||
|
|
ad605ee356 | ||
|
|
4ab235c06c | ||
|
|
9a2b85d71c | ||
|
|
29b58dd4c5 | ||
|
|
36ad4eb145 | ||
|
|
61ab519791 | ||
|
|
678941afc1 | ||
|
|
b153254b3a | ||
|
|
17cd7a9496 | ||
|
|
0735f44f91 | ||
|
|
04c69959b8 | ||
|
|
25cc8c927a | ||
|
|
6356b51b33 | ||
|
|
1890608f55 | ||
|
|
cd76fd9219 | ||
|
|
5b8cdaff39 | ||
|
|
f2f559230c | ||
|
|
e0b38cc9ac | ||
|
|
68dc79eddd | ||
|
|
76cea0c704 | ||
|
|
41d5d8b88a | ||
|
|
aa2746d0de | ||
|
|
b2f6aac754 | ||
|
|
a0dacf4930 | ||
|
|
c5ff5afc21 | ||
|
|
bd8523f208 | ||
|
|
0bfd70c405 | ||
|
|
47735d8fe1 | ||
|
|
617534112b | ||
|
|
271ec43189 | ||
|
|
10eb4742dd | ||
|
|
2a2ec06ec1 | ||
|
|
7237b142fa |
68
.github/workflows/build.yml
vendored
68
.github/workflows/build.yml
vendored
@@ -17,22 +17,21 @@ on:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.22', 'go1.23']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +42,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +58,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,20 +75,20 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.20
|
||||
- job_name: go1.22
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.22'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.21
|
||||
- job_name: go1.23
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '1.23'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -124,7 +123,8 @@ jobs:
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
@@ -137,7 +137,7 @@ jobs:
|
||||
brew untap --force homebrew/cask
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
brew install git-annex
|
||||
brew install git-annex git-annex-remote-rclone
|
||||
if: matrix.os == 'macos-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
@@ -217,30 +217,48 @@ jobs:
|
||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
~/.cache/golangci-lint
|
||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: false # Caching enabled (which is default) on this first lint step only, it handles complete cache of build, go modules and golangci-lint analysis which was necessary to get all lint steps to properly take advantage of it
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
@@ -248,7 +266,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
@@ -256,7 +274,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
@@ -264,7 +282,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
@@ -278,7 +296,7 @@ jobs:
|
||||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -293,7 +311,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build-image:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/386
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/arm64
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v7
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v6
|
||||
runs-on: ubuntu-24.04-arm
|
||||
|
||||
name: Build Docker Image for ${{ matrix.platform }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
def slugify(input_string, max_length=63):
|
||||
slug = input_string.lower()
|
||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
||||
slug = slug.strip()
|
||||
slug = re.sub(r'\s+', '-', slug)
|
||||
slug = re.sub(r'-+', '-', slug)
|
||||
slug = slug[:max_length]
|
||||
slug = re.sub(r'[-]+$', '', slug)
|
||||
return slug
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
go-build-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Publish Image Digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
provenance: false
|
||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
||||
# tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-image
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_NAME }}
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Extract Tags
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
||||
tags_string = " ".join(tags)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"TAGS={tags_string}\n")
|
||||
|
||||
- name: Extract Annotations
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
||||
annotations_string = " ".join(annotations)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create & Push Manifest List
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
${{ env.TAGS }} \
|
||||
${{ env.ANNOTATIONS }} \
|
||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect and Run Multi-Platform Image
|
||||
run: |
|
||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
||||
45
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
45
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
||||
|
||||
name: Release Build for Docker Plugin
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
@@ -1,77 +0,0 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
id: actual_patch_version
|
||||
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||
- name: Get actual minor version
|
||||
id: actual_minor_version
|
||||
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
15
.github/workflows/notify.yml
vendored
Normal file
15
.github/workflows/notify.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Notify users based on issue labels
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
notify:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jenschelkopf/issue-label-notification-action@1.3
|
||||
with:
|
||||
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
|
||||
recipients: |
|
||||
Support Contract=@rclone/support
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -3,7 +3,9 @@ _junk/
|
||||
rclone
|
||||
rclone.exe
|
||||
build
|
||||
docs/public
|
||||
/docs/public/
|
||||
/docs/.hugo_build.lock
|
||||
/docs/static/img/logos/
|
||||
rclone.iml
|
||||
.idea
|
||||
.history
|
||||
@@ -16,6 +18,5 @@ fuzz-build.zip
|
||||
Thumbs.db
|
||||
__pycache__
|
||||
.DS_Store
|
||||
/docs/static/img/logos/
|
||||
resource_windows_*.syso
|
||||
.devcontainer
|
||||
|
||||
@@ -13,6 +13,7 @@ linters:
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
- gocritic
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
@@ -98,3 +99,46 @@ linters-settings:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
gocritic:
|
||||
# Enable all default checks with some exceptions and some additions (commented).
|
||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Enabled by default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Enabled by default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Enabled by default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Enabled by default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Enabled by default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Not enabled by default
|
||||
#- singleCaseSwitch # Enabled by default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: "${configDir}/bin/rules.go"
|
||||
|
||||
@@ -209,7 +209,7 @@ altogether with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
test_all -backends drive
|
||||
|
||||
### Full integration testing
|
||||
|
||||
@@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
@@ -508,7 +508,7 @@ You'll need to modify the following files
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from genric config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
|
||||
44
Dockerfile
44
Dockerfile
@@ -1,19 +1,47 @@
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
RUN echo "**** Set Go Environment Variables ****" && \
|
||||
go env -w GOCACHE=/root/.cache/go-build
|
||||
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
make \
|
||||
bash \
|
||||
gawk \
|
||||
git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN echo "**** Download Go Dependencies ****" && \
|
||||
go mod download -x
|
||||
|
||||
RUN echo "**** Verify Go Dependencies ****" && \
|
||||
go mod verify
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
||||
echo "**** Build Binary ****" && \
|
||||
make
|
||||
|
||||
RUN echo "**** Print Version Binary ****" && \
|
||||
./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
ca-certificates \
|
||||
fuse3 \
|
||||
tzdata && \
|
||||
echo "Enable user_allow_other in fuse" && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@ Current active maintainers of rclone are:
|
||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
29273
MANUAL.html
generated
29273
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
36526
MANUAL.txt
generated
36526
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
19
Makefile
19
Makefile
@@ -43,6 +43,7 @@ ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||
endif
|
||||
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
|
||||
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
@@ -50,7 +51,7 @@ rclone:
|
||||
ifeq ($(GO_OS),windows)
|
||||
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
|
||||
endif
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
|
||||
ifeq ($(GO_OS),windows)
|
||||
rm resource_windows_`go env GOARCH`.syso
|
||||
endif
|
||||
@@ -59,7 +60,7 @@ endif
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@@ -87,13 +88,13 @@ test: rclone test_all
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@@ -143,10 +144,14 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
@@ -238,7 +243,7 @@ fetch_binaries:
|
||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w --disableFastRender
|
||||
cd docs && hugo server --logLevel info -w --disableFastRender
|
||||
|
||||
tag: retag doc
|
||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||
|
||||
@@ -55,14 +55,18 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
@@ -73,6 +77,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
@@ -88,10 +93,12 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
@@ -100,9 +107,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
|
||||
68
RELEASE.md
68
RELEASE.md
@@ -37,18 +37,51 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Update dependencies
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
Early in the next release cycle update the dependencies.
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* make updatedirect
|
||||
* make GOTAGS=cmount
|
||||
* make compiletest
|
||||
* git commit -a -v
|
||||
* make update
|
||||
* make GOTAGS=cmount
|
||||
* make compiletest
|
||||
* `make updatedirect`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* Fix anything which doesn't compile at this point and commit changes here
|
||||
* `git commit -a -v -m "build: update all dependencies"`
|
||||
|
||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||
|
||||
go 1.22.0
|
||||
|
||||
then go to manual mode. `go1.22` here is the lowest supported version
|
||||
in the `go.mod`.
|
||||
|
||||
If `make updatedirect` added a `toolchain` directive then remove it.
|
||||
We don't want to force a toolchain on our users. Linux packagers are
|
||||
often using a version of Go that is a few versions out of date.
|
||||
|
||||
```
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.22 -compat=1.22
|
||||
```
|
||||
|
||||
If the `go mod tidy` fails use the output from it to remove the
|
||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||
done
|
||||
|
||||
```
|
||||
git co go.mod go.sum
|
||||
```
|
||||
|
||||
And try again.
|
||||
|
||||
Optionally upgrade the direct and indirect dependencies. This is very
|
||||
likely to fail if the manual method was used abve - in that case
|
||||
ignore it as it is too time consuming to fix.
|
||||
|
||||
* `make update`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* roll back any updates which didn't compile
|
||||
* git commit -a -v --amend
|
||||
* `git commit -a -v --amend`
|
||||
* **NB** watch out for this changing the default go version in `go.mod`
|
||||
|
||||
Note that `make update` updates all direct and indirect dependencies
|
||||
@@ -57,6 +90,19 @@ doing that so it may be necessary to roll back dependencies to the
|
||||
version specified by `make updatedirect` in order to get rclone to
|
||||
build.
|
||||
|
||||
Once it compiles locally, push it on a test branch and commit fixes
|
||||
until the tests pass.
|
||||
|
||||
### Major versions
|
||||
|
||||
The above procedure will not upgrade major versions, so v2 to v3.
|
||||
However this tool can show which major versions might need to be
|
||||
upgraded:
|
||||
|
||||
go run github.com/icholy/gomajor@latest list -major
|
||||
|
||||
Expect API breakage when updating major versions.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
@@ -139,6 +185,8 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
|
||||
|
||||
To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
||||
@@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
||||
configfile.Install()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
config.FileSet(remoteName, "remote", root)
|
||||
config.FileSetValue(remoteName, "type", "alias")
|
||||
config.FileSetValue(remoteName, "remote", root)
|
||||
}
|
||||
|
||||
func TestNewFS(t *testing.T) {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/cloudinary"
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
@@ -17,13 +18,16 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/iclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/imagekit"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
@@ -39,6 +43,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,16 +3,149 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
func TestBlockIDCreator(t *testing.T) {
|
||||
// Check creation and random number
|
||||
bic, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
bic2, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, bic.random, bic2.random)
|
||||
assert.NotEqual(t, bic.random, [8]byte{})
|
||||
|
||||
// Set random to known value for tests
|
||||
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
chunkNumber := uint64(0xFEDCBA9876543210)
|
||||
|
||||
// Check creation of ID
|
||||
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
||||
got := bic.newBlockID(chunkNumber)
|
||||
assert.Equal(t, want, got)
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
||||
|
||||
// Test checkID is working
|
||||
assert.NoError(t, bic.checkID(chunkNumber, got))
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
||||
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
||||
}
|
||||
|
||||
func (f *Fs) testFeatures(t *testing.T) {
|
||||
// Check first feature flags are set on this remote
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
type ReadSeekCloser struct {
|
||||
*strings.Reader
|
||||
}
|
||||
|
||||
func (r *ReadSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stage a block at remote but don't commit it
|
||||
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
||||
var (
|
||||
containerName, blobPath = f.split(remote)
|
||||
containerClient = f.cntSVC(containerName)
|
||||
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
||||
data = "uncommitted data"
|
||||
blockID = "1"
|
||||
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
||||
)
|
||||
r := &ReadSeekCloser{strings.NewReader(data)}
|
||||
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the block is staged but not committed
|
||||
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, block := range blockList.UncommittedBlocks {
|
||||
if *block.Name == blockIDBase64 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.True(t, found, "Block ID not found in uncommitted blocks")
|
||||
}
|
||||
|
||||
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
||||
//
|
||||
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
||||
//
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
||||
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
remote = "testBlob"
|
||||
)
|
||||
|
||||
// Multipart copy the blob please
|
||||
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
||||
f.opt.UseCopyBlob = false
|
||||
f.opt.CopyCutoff = f.opt.ChunkSize
|
||||
defer func() {
|
||||
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
||||
}()
|
||||
|
||||
// Create a blob with uncommitted blocks
|
||||
f.stageBlockWithoutCommit(ctx, t, remote)
|
||||
|
||||
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
||||
|
||||
// Check the object does not exist
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
||||
size := 4*int(f.opt.ChunkSize) - 1
|
||||
contents := random.String(size)
|
||||
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
|
||||
// Check size
|
||||
assert.Equal(t, int64(size), o.Size())
|
||||
|
||||
// Create a new blob with uncommitted blocks
|
||||
newRemote := "testBlob2"
|
||||
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
||||
|
||||
// Copy over that block
|
||||
dst, err := f.Copy(ctx, o, newRemote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check basics
|
||||
assert.Equal(t, int64(size), dst.Size())
|
||||
assert.Equal(t, newRemote, dst.Remote())
|
||||
|
||||
// Check contents
|
||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||
assert.Equal(t, contents, gotContents)
|
||||
|
||||
// Remove the object
|
||||
require.NoError(t, dst.Remove(ctx))
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Features", f.testFeatures)
|
||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||
}
|
||||
|
||||
@@ -15,13 +15,17 @@ import (
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestAzureBlob"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -40,6 +44,7 @@ func TestIntegration2(t *testing.T) {
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -48,8 +53,13 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setCopyCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
|
||||
@@ -237,6 +237,30 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_instance_discovery",
|
||||
Help: `Skip requesting Microsoft Entra instance metadata
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||
authenticating.
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_az",
|
||||
Help: `Use Azure CLI tool az for authentication
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set env_auth at the same time.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
@@ -319,10 +343,12 @@ type Options struct {
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
||||
@@ -393,8 +419,10 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
policyClientOptions := policy.ClientOptions{
|
||||
Transport: newTransporter(ctx),
|
||||
}
|
||||
backup := service.ShareTokenIntentBackup
|
||||
clientOpt := service.ClientOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
FileRequestIntent: &backup,
|
||||
}
|
||||
|
||||
// Here we auth by setting one of cred, sharedKeyCred or f.client
|
||||
@@ -412,7 +440,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
// Read credentials from the environment
|
||||
options := azidentity.DefaultAzureCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
@@ -423,6 +452,13 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
fmt.Println(cred)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.SASURL != "":
|
||||
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
|
||||
if err != nil {
|
||||
@@ -897,7 +933,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
//
|
||||
// May make a network request becaue the [fs.List] method does not
|
||||
// May make a network request because the [fs.List] method does not
|
||||
// return MD5 hashes for DirEntry
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
@@ -1035,12 +1071,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
|
||||
return fmt.Errorf("update: unable to create file: %w", createErr)
|
||||
}
|
||||
} else {
|
||||
} else if size != o.Size() {
|
||||
// Resize the file if needed
|
||||
if size != o.Size() {
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -42,9 +42,10 @@ type Bucket struct {
|
||||
|
||||
// LifecycleRule is a single lifecycle rule
|
||||
type LifecycleRule struct {
|
||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
|
||||
@@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -102,7 +103,7 @@ below will cause b2 to return specific errors:
|
||||
* "force_cap_exceeded"
|
||||
|
||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist).`,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
@@ -244,7 +245,7 @@ See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// See: https://www.backblaze.com/b2/docs/files.html
|
||||
// See: https://www.backblaze.com/docs/cloud-storage-files
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// FIXME: allow /, but not leading, trailing or double
|
||||
Default: (encoder.Display |
|
||||
@@ -1317,16 +1318,22 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||
// Check current version of the file
|
||||
if deleteHidden && object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "delete") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
}
|
||||
last = remote
|
||||
tr.Done(ctx, nil)
|
||||
@@ -1566,7 +1573,7 @@ func (o *Object) Size() int64 {
|
||||
//
|
||||
// Make sure it is lower case.
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Remove unverified prefix - see https://www.backblaze.com/docs/cloud-storage-upload-files-with-the-native-api
|
||||
// Some tools (e.g. Cyberduck) use this
|
||||
func cleanSHA1(sha1 string) string {
|
||||
const unverified = "unverified:"
|
||||
@@ -1593,7 +1600,11 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
o.size = Size
|
||||
// Use the UploadTimestamp if can't get file info
|
||||
o.modTime = time.Time(UploadTimestamp)
|
||||
return o.parseTimeString(Info[timeKey])
|
||||
err = o.parseTimeString(Info[timeKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from an api.File
|
||||
@@ -1695,6 +1706,16 @@ func timeString(modTime time.Time) string {
|
||||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||
}
|
||||
|
||||
// parseTimeStringHelper converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time
|
||||
func parseTimeStringHelper(timeString string) (time.Time, error) {
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
@@ -1702,12 +1723,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
if timeString == "" {
|
||||
return nil
|
||||
}
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
modTime, err := parseTimeStringHelper(timeString)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return nil
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1861,6 +1882,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
ContentType: resp.Header.Get("Content-Type"),
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
@@ -1958,7 +1980,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
if err == nil {
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
|
||||
if err != nil {
|
||||
o.fs.putRW(rw)
|
||||
return err
|
||||
@@ -1990,7 +2012,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.decodeMetaDataFileInfo(up.info)
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||
if calculatedSha1 == "" {
|
||||
@@ -2095,6 +2120,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
|
||||
// When metadata support is added to b2, this method will need a more generic name
|
||||
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k = strings.ToLower(k)
|
||||
// For now, the only metadata we're concerned with is "mtime"
|
||||
switch k {
|
||||
case "mtime":
|
||||
// mtime in meta overrides source ModTime
|
||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||
} else {
|
||||
modTime = metaModTime
|
||||
}
|
||||
default:
|
||||
// Do nothing for now
|
||||
}
|
||||
}
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
@@ -2126,7 +2181,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
return info, up, err
|
||||
}
|
||||
|
||||
@@ -2172,6 +2227,7 @@ This will dump something like this showing the lifecycle rules.
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
@@ -2198,8 +2254,9 @@ overwrites will still cause versions to be made.
|
||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2219,14 +2276,23 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
||||
}
|
||||
newRule.DaysFromUploadingToHiding = &days
|
||||
}
|
||||
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
|
||||
days, err := strconv.Atoi(daysStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
|
||||
}
|
||||
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
|
||||
}
|
||||
bucketName, _ := f.split("")
|
||||
if bucketName == "" {
|
||||
return nil, errors.New("bucket required")
|
||||
|
||||
}
|
||||
|
||||
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
|
||||
|
||||
var bucket *api.Bucket
|
||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
|
||||
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
|
||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
@@ -23,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
|
||||
|
||||
var encodeTest = []struct {
|
||||
fullyEncoded string
|
||||
@@ -184,57 +186,120 @@ func TestParseTimeString(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
// This is adapted from the s3 equivalent.
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
original := random.String(1000)
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, mimeType, nil)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We currently have a limited amount of metadata to test with B2
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
||||
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
||||
var headers = make(map[string]string)
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
headers[k[len(headerPrefix):]] = v
|
||||
}
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
return headers
|
||||
}
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
||||
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
||||
t.Run(what, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(size)
|
||||
require.NoError(t, err)
|
||||
original := random.String(int(ss))
|
||||
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
if chunkSize != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(chunkSize)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadChunkSize(ss)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
if uploadCutoff != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(uploadCutoff)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadCutoff(ss)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
metadata := fs.Metadata{
|
||||
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
||||
|
||||
"mtime": "2009-05-06T04:05:06.499Z",
|
||||
}
|
||||
|
||||
// Need to specify HTTP options with the header prefix since they are passed as-is
|
||||
options := []fs.OpenOption{
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
||||
}
|
||||
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// X-Bz-Info-a & X-Bz-Info-b
|
||||
optMetadata := OpenOptionToMetaData(options)
|
||||
for k, v := range optMetadata {
|
||||
got := gotMetadata.Info[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
// 1 kB regular file
|
||||
f.internalTestMetadata(t, "1kiB", "", "")
|
||||
|
||||
// 10 MiB large file
|
||||
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
||||
}
|
||||
|
||||
func sha1Sum(t *testing.T, s string) string {
|
||||
hash := sha1.Sum([]byte(s))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
@@ -394,24 +459,161 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Cleanup", func(t *testing.T) {
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
// Set --b2-versions for this test
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
fstest.CheckListing(t, f, items)
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should be unchanged after dry run
|
||||
before := listAllFiles(ctx, t, f, dirName)
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
after := listAllFiles(ctx, t, f, dirName)
|
||||
assert.Equal(t, before, after)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should reflect current state after cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
})
|
||||
})
|
||||
|
||||
// Purge gets tested later
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// B2CleanupHidden tests cleaning up hidden files
|
||||
t.Run("CleanupUnfinished", func(t *testing.T) {
|
||||
dirName := "unfinished"
|
||||
fileCount := 5
|
||||
expectedFiles := []string{}
|
||||
for i := 1; i < fileCount; i++ {
|
||||
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
|
||||
expectedFiles = append(expectedFiles, fileName)
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: fileName,
|
||||
}
|
||||
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
||||
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// Listing should not change after dry run
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
// Listing should be empty after real cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, []string{})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
|
||||
bucket, directory := f.split(dirName)
|
||||
foundFiles := []string{}
|
||||
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if !isDirectory {
|
||||
foundFiles = append(foundFiles, object.Name)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
sort.Strings(foundFiles)
|
||||
return foundFiles
|
||||
}
|
||||
|
||||
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
|
||||
foundFiles := listAllFiles(ctx, t, f, dirName)
|
||||
sort.Strings(expectedFiles)
|
||||
assert.Equal(t, expectedFiles, foundFiles)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
opt := map[string]string{}
|
||||
|
||||
t.Run("InitState", func(t *testing.T) {
|
||||
// There should be no lifecycle rules at the outset
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// There should still be no lifecycle rules after each dry run operation
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
})
|
||||
}
|
||||
|
||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("Versions", f.InternalTestVersions)
|
||||
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
|
||||
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
|
||||
|
||||
package b2
|
||||
|
||||
@@ -91,7 +91,7 @@ type largeUpload struct {
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := 0
|
||||
chunkSize := defaultChunkSize
|
||||
@@ -104,11 +104,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
parts++
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -118,12 +113,27 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
BucketID: bucketID,
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
}
|
||||
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
||||
if newInfo == nil {
|
||||
modTime := src.ModTime(ctx)
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request.ContentType = fs.MimeType(ctx, src)
|
||||
request.Info = map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
}
|
||||
// Custom upload headers - remove header prefix since they are sent in the body
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
request.Info[k[len(headerPrefix):]] = v
|
||||
} else {
|
||||
optionsToSend = append(optionsToSend, option)
|
||||
}
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
@@ -134,6 +144,11 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
request.ContentType = newInfo.ContentType
|
||||
request.Info = newInfo.Info
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
Options: optionsToSend,
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
|
||||
@@ -43,9 +43,9 @@ import (
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,12 +64,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -256,8 +254,10 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
||||
}
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if block == nil {
|
||||
return nil, errors.New("box: failed to PEM decode private key")
|
||||
}
|
||||
if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||
}
|
||||
@@ -619,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
// fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
@@ -966,6 +966,26 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check if dest already exists
|
||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if item != nil { // dest already exists, need to copy to temp name and then move
|
||||
tempSuffix := "-rclone-copy-" + random.String(8)
|
||||
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
|
||||
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
|
||||
err = f.deleteObject(ctx, item.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.Move(ctx, tempObj, remote)
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
||||
22
backend/cache/cache.go
vendored
22
backend/cache/cache.go
vendored
@@ -409,18 +409,16 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else {
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
87
backend/cache/cache_internal_test.go
vendored
87
backend/cache/cache_internal_test.go
vendored
@@ -10,7 +10,6 @@ import (
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
@@ -33,7 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -93,7 +92,7 @@ func TestMain(m *testing.M) {
|
||||
goflag.Parse()
|
||||
var rc int
|
||||
|
||||
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
|
||||
runInstance = newRun()
|
||||
rc = m.Run()
|
||||
os.Exit(rc)
|
||||
@@ -123,10 +122,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
|
||||
/* TODO: is this testing something?
|
||||
func TestInternalVfsCache(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||
vfscommon.Opt.DirCacheTime = time.Second * 30
|
||||
testSize := int64(524288000)
|
||||
|
||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
@@ -338,7 +337,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
@@ -368,7 +367,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
@@ -408,7 +407,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
// update in the wrapped fs
|
||||
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
||||
require.NoError(t, err)
|
||||
log.Printf("original size: %v", originalSize)
|
||||
fs.Logf(nil, "original size: %v", originalSize)
|
||||
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
@@ -417,7 +416,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
if runInstance.rootIsCrypt {
|
||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||
require.NoError(t, err)
|
||||
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
||||
expectedSize++ // FIXME newline gets in, likely test data issue
|
||||
} else {
|
||||
data2 = []byte("test content")
|
||||
}
|
||||
@@ -425,7 +424,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data2)), o.Size())
|
||||
log.Printf("updated size: %v", len(data2))
|
||||
fs.Logf(nil, "updated size: %v", len(data2))
|
||||
|
||||
// get a new instance from the cache
|
||||
if runInstance.wrappedIsExternal {
|
||||
@@ -485,49 +484,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
err = runInstance.retryBlock(func() error {
|
||||
li, err := runInstance.list(t, rootFs, "test")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 2 {
|
||||
log.Printf("not expected listing /test: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test: %v", li)
|
||||
return fmt.Errorf("not expected listing /test: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 0 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/second")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/second: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test/second: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "data.bin" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/second/data.bin" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
fs.Logf(nil, "unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
|
||||
log.Printf("complete listing: %v", li)
|
||||
fs.Logf(nil, "complete listing: %v", li)
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
@@ -577,43 +576,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
err = runInstance.retryBlock(func() error {
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
if !found {
|
||||
log.Printf("not found /test")
|
||||
fs.Logf(nil, "not found /test")
|
||||
return fmt.Errorf("not found /test")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one")
|
||||
fs.Logf(nil, "not found /test/one")
|
||||
return fmt.Errorf("not found /test/one")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one/test2")
|
||||
fs.Logf(nil, "not found /test/one/test2")
|
||||
return fmt.Errorf("not found /test/one/test2")
|
||||
}
|
||||
li, err := runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "test2" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/one/test2" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
fs.Logf(nil, "unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
log.Printf("complete listing /test/one/test2")
|
||||
fs.Logf(nil, "complete listing /test/one/test2")
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
@@ -708,7 +707,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -743,7 +742,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
@@ -771,24 +770,24 @@ func TestInternalBug2117(t *testing.T) {
|
||||
|
||||
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 1)
|
||||
|
||||
time.Sleep(time.Second * 30)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 1)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test/dir1")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 4)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 4)
|
||||
}
|
||||
|
||||
@@ -829,7 +828,7 @@ func newRun() *run {
|
||||
} else {
|
||||
r.tmpUploadDir = uploadDir
|
||||
}
|
||||
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
|
||||
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
|
||||
|
||||
return r
|
||||
}
|
||||
@@ -850,8 +849,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.FileSections() {
|
||||
if s == remote {
|
||||
for _, s := range config.GetRemotes() {
|
||||
if s.Name == remote {
|
||||
remoteExists = true
|
||||
}
|
||||
}
|
||||
@@ -875,12 +874,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
cacheRemote := remote
|
||||
if !remoteExists {
|
||||
localRemote := remote + "-local"
|
||||
config.FileSet(localRemote, "type", "local")
|
||||
config.FileSet(localRemote, "nounc", "true")
|
||||
config.FileSetValue(localRemote, "type", "local")
|
||||
config.FileSetValue(localRemote, "nounc", "true")
|
||||
m.Set("type", "cache")
|
||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||
} else {
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
remoteType := config.GetValue(remote, "type")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
@@ -891,14 +890,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
m.Set("password", cryptPassword1)
|
||||
m.Set("password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := config.FileGet(remote, "remote")
|
||||
remoteRemote := config.GetValue(remote, "remote")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := config.FileGet(remoteWrapping, "type")
|
||||
remoteType := config.GetValue(remoteWrapping, "type")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
@@ -1192,7 +1191,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||
if r.rootIsCrypt {
|
||||
denominator := int64(65536 + 16)
|
||||
size = size - 32
|
||||
size -= 32
|
||||
quotient := size / denominator
|
||||
remainder := size % denominator
|
||||
return (quotient*65536 + remainder - 16)
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
|
||||
10
backend/cache/handle.go
vendored
10
backend/cache/handle.go
vendored
@@ -208,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
chunkStart -= offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
@@ -327,7 +327,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
@@ -415,10 +415,8 @@ func (w *worker) run() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
|
||||
@@ -308,7 +308,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
}
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||
|
||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
||||
@@ -326,9 +325,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
if testErr == fs.ErrorIsFile {
|
||||
f.base = newBase
|
||||
err = testErr
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
}
|
||||
}
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
@@ -988,7 +987,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
}
|
||||
}
|
||||
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
if o.main == nil && len(o.chunks) == 0 {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta || quickScan {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
@@ -2481,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
|
||||
@@ -36,6 +36,7 @@ func TestIntegration(t *testing.T) {
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
"Metadata",
|
||||
"SetMetadata",
|
||||
},
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
|
||||
48
backend/cloudinary/api/types.go
Normal file
48
backend/cloudinary/api/types.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Package api has type definitions for cloudinary
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CloudinaryEncoder extends the built-in encoder
|
||||
type CloudinaryEncoder interface {
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in this encoding.
|
||||
FromStandardPath(string) string
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in this encoding.
|
||||
FromStandardName(string) string
|
||||
// ToStandardPath takes a / separated path in this encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
ToStandardPath(string) string
|
||||
// ToStandardName takes name in this encoding and converts
|
||||
// it in Standard encoding.
|
||||
ToStandardName(string) string
|
||||
// Encoded root of the remote (as passed into NewFs)
|
||||
FromStandardFullPath(string) string
|
||||
}
|
||||
|
||||
// UpdateOptions was created to pass options from Update to Put
|
||||
type UpdateOptions struct {
|
||||
PublicID string
|
||||
ResourceType string
|
||||
DeliveryType string
|
||||
AssetFolder string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
// Header formats the option as a string
|
||||
func (o *UpdateOptions) Header() (string, string) {
|
||||
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
||||
}
|
||||
|
||||
// Mandatory returns whether the option must be parsed or can be ignored
|
||||
func (o *UpdateOptions) Mandatory() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// String formats the option into human-readable form
|
||||
func (o *UpdateOptions) String() string {
|
||||
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
||||
}
|
||||
711
backend/cloudinary/cloudinary.go
Normal file
711
backend/cloudinary/cloudinary.go
Normal file
@@ -0,0 +1,711 @@
|
||||
// Package cloudinary provides an interface to the Cloudinary DAM
|
||||
package cloudinary
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudinary/cloudinary-go/v2"
|
||||
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/admin"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
|
||||
"github.com/rclone/rclone/backend/cloudinary/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/zeebo/blake3"
|
||||
)
|
||||
|
||||
// Cloudinary shouldn't have a trailing dot if there is no path
|
||||
func cldPathDir(somePath string) string {
|
||||
if somePath == "" || somePath == "." {
|
||||
return somePath
|
||||
}
|
||||
dir := path.Dir(somePath)
|
||||
if dir == "." {
|
||||
return ""
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "cloudinary",
|
||||
Description: "Cloudinary",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "cloud_name",
|
||||
Help: "Cloudinary Environment Name",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "Cloudinary API Key",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "api_secret",
|
||||
Help: "Cloudinary API Secret",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "upload_prefix",
|
||||
Help: "Specify the API endpoint for environments out of the US",
|
||||
},
|
||||
{
|
||||
Name: "upload_preset",
|
||||
Help: "Upload Preset to select asset manipulation on upload",
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeAsterisk |
|
||||
encoder.EncodePipe |
|
||||
encoder.EncodeHash |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeDot),
|
||||
},
|
||||
{
|
||||
Name: "eventually_consistent_delay",
|
||||
Default: fs.Duration(0),
|
||||
Advanced: true,
|
||||
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
CloudName string `config:"cloud_name"`
|
||||
APIKey string `config:"api_key"`
|
||||
APISecret string `config:"api_secret"`
|
||||
UploadPrefix string `config:"upload_prefix"`
|
||||
UploadPreset string `config:"upload_preset"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
||||
}
|
||||
|
||||
// Fs represents a remote cloudinary server
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
pacer *fs.Pacer
|
||||
srv *rest.Client // For downloading assets via the Cloudinary CDN
|
||||
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
|
||||
lastCRUD time.Time
|
||||
}
|
||||
|
||||
// Object describes a cloudinary object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
url string
|
||||
md5sum string
|
||||
publicID string
|
||||
resourceType string
|
||||
deliveryType string
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the Cloudinary client
|
||||
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
|
||||
}
|
||||
cld.Admin.Client = *fshttp.NewClient(ctx)
|
||||
cld.Upload.Client = *fshttp.NewClient(ctx)
|
||||
if opt.UploadPrefix != "" {
|
||||
cld.Config.API.UploadPrefix = opt.UploadPrefix
|
||||
}
|
||||
client := fshttp.NewClient(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
cld: cld,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
|
||||
srv: rest.NewClient(client),
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if root != "" {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = cldPathDir(root)
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
||||
// File doesn't exist so return the previous root
|
||||
f.root = root
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// FromStandardPath implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardPath(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
// FromStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardName(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
// ToStandardPath implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardPath(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
|
||||
}
|
||||
|
||||
// ToStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardName(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
|
||||
}
|
||||
|
||||
// FromStandardFullPath encodes a full path to Cloudinary standard
|
||||
func (f *Fs) FromStandardFullPath(dir string) string {
|
||||
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
|
||||
}
|
||||
|
||||
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
|
||||
func (f *Fs) ToAssetFolderAPI(dir string) string {
|
||||
return strings.ReplaceAll(dir, "%", "%25")
|
||||
}
|
||||
|
||||
// ToDisplayNameElastic encodes a special case of elasticsearch
|
||||
func (f *Fs) ToDisplayNameElastic(dir string) string {
|
||||
return strings.ReplaceAll(dir, "!", "\\!")
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// WaitEventuallyConsistent waits till the FS is eventually consistent
|
||||
func (f *Fs) WaitEventuallyConsistent() {
|
||||
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
|
||||
return
|
||||
}
|
||||
delay := time.Duration(f.opt.EventuallyConsistentDelay)
|
||||
timeSinceLastCRUD := time.Since(f.lastCRUD)
|
||||
if timeSinceLastCRUD < delay {
|
||||
time.Sleep(delay - timeSinceLastCRUD)
|
||||
}
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Cloudinary root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
remotePrefix := f.FromStandardFullPath(dir)
|
||||
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
|
||||
remotePrefix += "/"
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
dirs := make(map[string]struct{})
|
||||
nextCursor := ""
|
||||
f.WaitEventuallyConsistent()
|
||||
for {
|
||||
// user the folders api to list folders.
|
||||
folderParams := admin.SubFoldersParams{
|
||||
Folder: f.ToAssetFolderAPI(remotePrefix),
|
||||
MaxResults: 500,
|
||||
}
|
||||
if nextCursor != "" {
|
||||
folderParams.NextCursor = nextCursor
|
||||
}
|
||||
|
||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
|
||||
}
|
||||
if results.Error.Message != "" {
|
||||
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
|
||||
}
|
||||
|
||||
for _, folder := range results.Folders {
|
||||
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
|
||||
parts := strings.Split(relativePath, "/")
|
||||
|
||||
// It's a directory
|
||||
dirName := parts[len(parts)-1]
|
||||
if _, found := dirs[dirName]; !found {
|
||||
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
|
||||
entries = append(entries, d)
|
||||
dirs[dirName] = struct{}{}
|
||||
}
|
||||
}
|
||||
// Break if there are no more results
|
||||
if results.NextCursor == "" {
|
||||
break
|
||||
}
|
||||
nextCursor = results.NextCursor
|
||||
}
|
||||
|
||||
for {
|
||||
// Use the assets.AssetsByAssetFolder API to list assets
|
||||
assetsParams := admin.AssetsByAssetFolderParams{
|
||||
AssetFolder: remotePrefix,
|
||||
MaxResults: 500,
|
||||
}
|
||||
if nextCursor != "" {
|
||||
assetsParams.NextCursor = nextCursor
|
||||
}
|
||||
|
||||
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list assets: %w", err)
|
||||
}
|
||||
|
||||
for _, asset := range results.Assets {
|
||||
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
|
||||
if dir != "" {
|
||||
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(asset.Bytes),
|
||||
modTime: asset.CreatedAt,
|
||||
url: asset.SecureURL,
|
||||
publicID: asset.PublicID,
|
||||
resourceType: asset.AssetType,
|
||||
deliveryType: asset.Type,
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
|
||||
// Break if there are no more results
|
||||
if results.NextCursor == "" {
|
||||
break
|
||||
}
|
||||
nextCursor = results.NextCursor
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
searchParams := search.Query{
|
||||
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
|
||||
f.FromStandardFullPath(cldPathDir(remote)),
|
||||
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
|
||||
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
|
||||
MaxResults: 2,
|
||||
}
|
||||
var results *admin.SearchResult
|
||||
f.WaitEventuallyConsistent()
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err1 error
|
||||
results, err1 = f.cld.Admin.Search(ctx, searchParams)
|
||||
if err1 == nil && results.TotalCount != len(results.Assets) {
|
||||
err1 = errors.New("partial response so waiting for eventual consistency")
|
||||
}
|
||||
return shouldRetry(ctx, nil, err1)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if results.TotalCount == 0 || len(results.Assets) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
asset := results.Assets[0]
|
||||
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(asset.Bytes),
|
||||
modTime: asset.UploadedAt,
|
||||
url: asset.SecureURL,
|
||||
md5sum: asset.Etag,
|
||||
publicID: asset.PublicID,
|
||||
resourceType: asset.ResourceType,
|
||||
deliveryType: asset.Type,
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
|
||||
payload := []byte(path.Join(assetFolder, displayName))
|
||||
hash := blake3.Sum256(payload)
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// Put uploads content to Cloudinary
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
params := uploader.UploadParams{
|
||||
UploadPreset: f.opt.UploadPreset,
|
||||
}
|
||||
|
||||
updateObject := false
|
||||
var modTime time.Time
|
||||
for _, option := range options {
|
||||
if updateOptions, ok := option.(*api.UpdateOptions); ok {
|
||||
if updateOptions.PublicID != "" {
|
||||
updateObject = true
|
||||
params.Overwrite = SDKApi.Bool(true)
|
||||
params.Invalidate = SDKApi.Bool(true)
|
||||
params.PublicID = updateOptions.PublicID
|
||||
params.ResourceType = updateOptions.ResourceType
|
||||
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
|
||||
params.AssetFolder = updateOptions.AssetFolder
|
||||
params.DisplayName = updateOptions.DisplayName
|
||||
modTime = src.ModTime(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !updateObject {
|
||||
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
|
||||
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
|
||||
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
|
||||
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
|
||||
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
|
||||
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
|
||||
}
|
||||
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
|
||||
}
|
||||
if !updateObject {
|
||||
modTime = uploadResult.CreatedAt
|
||||
}
|
||||
if uploadResult.Error.Message != "" {
|
||||
return nil, errors.New(uploadResult.Error.Message)
|
||||
}
|
||||
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: int64(uploadResult.Bytes),
|
||||
modTime: modTime,
|
||||
url: uploadResult.SecureURL,
|
||||
md5sum: uploadResult.Etag,
|
||||
publicID: uploadResult.PublicID,
|
||||
resourceType: uploadResult.ResourceType,
|
||||
deliveryType: uploadResult.Type,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// Mkdir creates empty folders
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
|
||||
res, err := f.cld.Admin.CreateFolder(ctx, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Error.Message != "" {
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes empty folders
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// Additional test because Cloudinary will delete folders without
|
||||
// assets, regardless of empty sub-folders
|
||||
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
|
||||
folderParams := admin.SubFoldersParams{
|
||||
Folder: folder,
|
||||
MaxResults: 1,
|
||||
}
|
||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if results.TotalCount > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
params := admin.DeleteFolderParams{Folder: folder}
|
||||
res, err := f.cld.Admin.DeleteFolder(ctx, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Error.Message != "" {
|
||||
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
420, // Too Many Requests (legacy)
|
||||
429, // Too Many Requests
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err != nil {
|
||||
tryAgain := "Try again on "
|
||||
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
|
||||
layout := "2006-01-02 15:04:05 UTC"
|
||||
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
|
||||
timestamp, err2 := time.Parse(layout, dateStr)
|
||||
if err2 == nil {
|
||||
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Hash returns the MD5 of an object
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5sum, nil
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size of object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.url,
|
||||
Options: options,
|
||||
}
|
||||
var offset int64
|
||||
var count int64
|
||||
var key string
|
||||
var value string
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
key, value = option.Header()
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
key, value = option.Header()
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if key != "" && value != "" {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders[key] = value
|
||||
}
|
||||
// Make sure that the asset is fully available
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
|
||||
if clErr == nil && count == int64(cl) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
options = append(options, &api.UpdateOptions{
|
||||
PublicID: o.publicID,
|
||||
ResourceType: o.resourceType,
|
||||
DeliveryType: o.deliveryType,
|
||||
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
|
||||
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
|
||||
})
|
||||
updatedObj, err := o.fs.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uo, ok := updatedObj.(*Object); ok {
|
||||
o.size = uo.size
|
||||
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
|
||||
o.url = uo.url
|
||||
o.md5sum = uo.md5sum
|
||||
o.publicID = uo.publicID
|
||||
o.resourceType = uo.resourceType
|
||||
o.deliveryType = uo.deliveryType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := uploader.DestroyParams{
|
||||
PublicID: o.publicID,
|
||||
ResourceType: o.resourceType,
|
||||
Type: o.deliveryType,
|
||||
}
|
||||
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
|
||||
o.fs.lastCRUD = time.Now()
|
||||
if dErr != nil {
|
||||
return dErr
|
||||
}
|
||||
|
||||
if res.Error.Message != "" {
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
if res.Result != "ok" {
|
||||
return errors.New(res.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
23
backend/cloudinary/cloudinary_test.go
Normal file
23
backend/cloudinary/cloudinary_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
// Test Cloudinary filesystem interface
|
||||
|
||||
package cloudinary_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/cloudinary"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestCloudinary"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*cloudinary.Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -1119,6 +1119,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// SetTier performs changing storage tier of the Object if
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
const (
|
||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||
chunkStreams = 0 // Streams to use for reading
|
||||
|
||||
bufferSize = 8388608
|
||||
heuristicBytes = 1048576
|
||||
@@ -1286,6 +1287,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
@@ -1351,7 +1363,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
// Get a chunkedreader for the wrapped object
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||
// Get file handle
|
||||
var file io.Reader
|
||||
if offset != 0 {
|
||||
|
||||
@@ -329,7 +329,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
||||
for _, runeValue := range plaintext {
|
||||
dir += int(runeValue)
|
||||
}
|
||||
dir = dir % 256
|
||||
dir %= 256
|
||||
|
||||
// We'll use this number to store in the result filename...
|
||||
var result bytes.Buffer
|
||||
@@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if pos >= 26 {
|
||||
pos -= 6
|
||||
}
|
||||
pos = pos - thisdir
|
||||
pos -= thisdir
|
||||
if pos < 0 {
|
||||
pos += 52
|
||||
}
|
||||
@@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
(*fh.buf)[i] = 0
|
||||
fh.buf[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
|
||||
@@ -1248,6 +1248,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
//
|
||||
|
||||
@@ -80,9 +80,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
driveConfig = &oauth2.Config{
|
||||
driveConfig = &oauthutil.Config{
|
||||
Scopes: []string{scopePrefix + "drive"},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -120,6 +121,7 @@ var (
|
||||
"text/html": ".html",
|
||||
"text/plain": ".txt",
|
||||
"text/tab-separated-values": ".tsv",
|
||||
"text/markdown": ".md",
|
||||
}
|
||||
_mimeTypeToExtensionLinks = map[string]string{
|
||||
"application/x-link-desktop": ".desktop",
|
||||
@@ -151,6 +153,7 @@ func (rwChoices) Choices() []fs.BitsChoicesInfo {
|
||||
{Bit: uint64(rwOff), Name: "off"},
|
||||
{Bit: uint64(rwRead), Name: "read"},
|
||||
{Bit: uint64(rwWrite), Name: "write"},
|
||||
{Bit: uint64(rwFailOK), Name: "failok"},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,6 +163,7 @@ type rwChoice = fs.Bits[rwChoices]
|
||||
const (
|
||||
rwRead rwChoice = 1 << iota
|
||||
rwWrite
|
||||
rwFailOK
|
||||
rwOff rwChoice = 0
|
||||
)
|
||||
|
||||
@@ -173,6 +177,9 @@ var rwExamples = fs.OptionExamples{{
|
||||
}, {
|
||||
Value: rwWrite.String(),
|
||||
Help: "Write the value only",
|
||||
}, {
|
||||
Value: rwFailOK.String(),
|
||||
Help: "If writing fails log errors only, don't fail the transfer",
|
||||
}, {
|
||||
Value: (rwRead | rwWrite).String(),
|
||||
Help: "Read and Write the value.",
|
||||
@@ -196,7 +203,6 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1205,6 +1211,7 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
@@ -1215,9 +1222,11 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
}
|
||||
|
||||
func isLinkMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/x-link-")
|
||||
}
|
||||
@@ -1650,7 +1659,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
ctx context.Context, remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
extension, exportName, exportMimeType string, isDocument bool,
|
||||
) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
@@ -1747,10 +1757,9 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
|
||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
pathID = actualID(pathID)
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
Description: leaf,
|
||||
MimeType: driveFolderType,
|
||||
Parents: []string{pathID},
|
||||
Name: leaf,
|
||||
MimeType: driveFolderType,
|
||||
Parents: []string{pathID},
|
||||
}
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
@@ -1842,6 +1851,7 @@ func linkTemplate(mt string) *template.Template {
|
||||
})
|
||||
return _linkTemplates[mt]
|
||||
}
|
||||
|
||||
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||
fetchFormatsOnce.Do(func() {
|
||||
var about *drive.About
|
||||
@@ -1887,7 +1897,8 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", false)
|
||||
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||
extension, mimeType string, isDocument bool) {
|
||||
extension, mimeType string, isDocument bool,
|
||||
) {
|
||||
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||
if isDocument {
|
||||
for _, _extension := range f.exportExtensions {
|
||||
@@ -2215,7 +2226,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
case in <- job:
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
wg.Add(-1)
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2430,7 +2441,6 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
// Define the metadata for the file we are going to create.
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
Description: leaf,
|
||||
Parents: []string{directoryID},
|
||||
ModifiedTime: modTime.Format(timeFormatOut),
|
||||
}
|
||||
@@ -2684,7 +2694,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
trashedFiles := false
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@@ -2830,7 +2840,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// FIXME remove this when google fixes the problem!
|
||||
if isDoc {
|
||||
// A short sleep is needed here in order to make the
|
||||
// change effective, without it is is ignored. This is
|
||||
// change effective, without it is ignored. This is
|
||||
// probably some eventual consistency nastiness.
|
||||
sleepTime := 2 * time.Second
|
||||
fs.Debugf(f, "Sleeping for %v before setting the modtime to work around drive bug - see #4517", sleepTime)
|
||||
@@ -2921,7 +2931,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3182,6 +3191,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -3520,14 +3530,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
return f.unTrash(ctx, dir, directoryID, true)
|
||||
}
|
||||
|
||||
// copy file with id to dest
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
// copy or move file with id to dest
|
||||
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) {
|
||||
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't find id: %w", err)
|
||||
}
|
||||
if info.MimeType == driveFolderType {
|
||||
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||
@@ -3548,14 +3558,21 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy failed: %w", err)
|
||||
|
||||
var opErr error
|
||||
if operation == "moveid" {
|
||||
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
|
||||
} else {
|
||||
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
}
|
||||
if opErr != nil {
|
||||
return fmt.Errorf("%s failed: %w", operation, opErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
|
||||
// Run the drive query calling fn on each entry found
|
||||
func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (err error) {
|
||||
list := f.svc.Files.List()
|
||||
if query != "" {
|
||||
list.Q(query)
|
||||
@@ -3574,10 +3591,7 @@ func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, er
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
list.Spaces("appDataFolder")
|
||||
}
|
||||
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
|
||||
|
||||
var results []*drive.File
|
||||
for {
|
||||
var files *drive.FileList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -3585,20 +3599,66 @@ func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, er
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %w", err)
|
||||
return fmt.Errorf("failed to execute query: %w", err)
|
||||
}
|
||||
if files.IncompleteSearch {
|
||||
fs.Errorf(f, "search result INCOMPLETE")
|
||||
}
|
||||
results = append(results, files.Files...)
|
||||
for _, item := range files.Files {
|
||||
fn(item)
|
||||
}
|
||||
if files.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
list.PageToken(files.NextPageToken)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run the drive query returning the entries found
|
||||
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
|
||||
var results []*drive.File
|
||||
err = f.queryFn(ctx, query, func(item *drive.File) {
|
||||
results = append(results, item)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// Rescue, list or delete orphaned files
|
||||
func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error) {
|
||||
return f.queryFn(ctx, "'me' in owners and trashed=false", func(item *drive.File) {
|
||||
if len(item.Parents) != 0 {
|
||||
return
|
||||
}
|
||||
// Have found an orphaned entry
|
||||
if delete {
|
||||
fs.Infof(item.Name, "Deleting orphan %q into trash", item.Id)
|
||||
err = f.delete(ctx, item.Id, true)
|
||||
if err != nil {
|
||||
fs.Errorf(item.Name, "Failed to delete orphan %q: %v", item.Id, err)
|
||||
}
|
||||
} else if dirID == "" {
|
||||
operations.SyncPrintf("%q, %q\n", item.Name, item.Id)
|
||||
} else {
|
||||
fs.Infof(item.Name, "Rescuing orphan %q", item.Id)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Files.Update(item.Id, nil).
|
||||
AddParents(dirID).
|
||||
Fields(f.getFileFields(ctx)).
|
||||
SupportsAllDrives(true).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(item.Name, "Failed to rescue orphan %q: %v", item.Id, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
@@ -3743,6 +3803,28 @@ attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "moveid",
|
||||
Short: "Move files by ID",
|
||||
Long: `This command moves files by ID
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend moveid drive: ID path
|
||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||
|
||||
It moves the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone moveto).
|
||||
|
||||
The path should end with a / to indicate move the file as named to
|
||||
this directory. If it doesn't end with a / then the last path
|
||||
component will be used as the file name.
|
||||
|
||||
If the destination is a drive backend then server-side moving will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
Short: "Dump the export formats for debug purposes",
|
||||
@@ -3773,7 +3855,7 @@ file named "foo ' \.txt":
|
||||
|
||||
The result is a JSON array of matches, for example:
|
||||
|
||||
[
|
||||
[
|
||||
{
|
||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||
@@ -3789,7 +3871,38 @@ The result is a JSON array of matches, for example:
|
||||
"size": "311",
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]`,
|
||||
]`,
|
||||
}, {
|
||||
Name: "rescue",
|
||||
Short: "Rescue or delete any orphaned files",
|
||||
Long: `This command rescues or deletes any orphaned files or directories.
|
||||
|
||||
Sometimes files can get orphaned in Google Drive. This means that they
|
||||
are no longer in any folder in Google Drive.
|
||||
|
||||
This command finds those files and either rescues them to a directory
|
||||
you specify or deletes them.
|
||||
|
||||
Usage:
|
||||
|
||||
This can be used in 3 ways.
|
||||
|
||||
First, list all orphaned files
|
||||
|
||||
rclone backend rescue drive:
|
||||
|
||||
Second rescue all orphaned files to the directory indicated
|
||||
|
||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||
|
||||
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
||||
|
||||
rclone backend rescue drive: Orphans
|
||||
|
||||
Third delete all orphaned files to the trash
|
||||
|
||||
rclone backend rescue drive: -o delete
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -3890,16 +4003,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
dir = arg[0]
|
||||
}
|
||||
return f.unTrashDir(ctx, dir, true)
|
||||
case "copyid":
|
||||
case "copyid", "moveid":
|
||||
if len(arg)%2 != 0 {
|
||||
return nil, errors.New("need an even number of arguments")
|
||||
}
|
||||
for len(arg) > 0 {
|
||||
id, dest := arg[0], arg[1]
|
||||
arg = arg[2:]
|
||||
err = f.copyID(ctx, id, dest)
|
||||
err = f.copyOrMoveID(ctx, name, id, dest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
|
||||
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
@@ -3910,14 +4023,29 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
case "query":
|
||||
if len(arg) == 1 {
|
||||
query := arg[0]
|
||||
var results, err = f.query(ctx, query)
|
||||
results, err := f.query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
return nil, errors.New("need a query argument")
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
if len(arg) == 0 {
|
||||
// no arguments - list only
|
||||
} else if !delete && len(arg) == 1 {
|
||||
dir := arg[0]
|
||||
dirID, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find or create rescue directory %q: %w", dir, err)
|
||||
}
|
||||
fs.Infof(f, "Rescuing orphans into %q", dir)
|
||||
} else {
|
||||
return nil, errors.New("syntax error: need 0 or 1 args or -o delete")
|
||||
}
|
||||
return nil, f.rescue(ctx, dirID, delete)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
@@ -3961,8 +4089,9 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return "", nil
|
||||
@@ -3975,7 +4104,8 @@ func (o *baseObject) Size() int64 {
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error,
|
||||
) {
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
@@ -4188,12 +4318,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
// the HEAD in the listing to this GET. This stops rclone marking
|
||||
// the transfer as corrupted.
|
||||
var offset, end int64 = 0, -1
|
||||
var newOptions = options[:0]
|
||||
newOptions := options[:0]
|
||||
for _, o := range options {
|
||||
// Note that Range requests don't work on Google docs:
|
||||
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
|
||||
@@ -4220,9 +4351,10 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
var data = o.content
|
||||
data := o.content
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -4247,7 +4379,8 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
src fs.ObjectInfo,
|
||||
) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
@@ -4325,6 +4458,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
importMimeType := ""
|
||||
@@ -4420,6 +4554,7 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
func (o *linkObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{".doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
||||
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil},
|
||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||
} {
|
||||
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
|
||||
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
obj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("BadID", func(t *testing.T) {
|
||||
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "couldn't find id")
|
||||
})
|
||||
@@ -506,19 +506,31 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
t.Run("Directory", func(t *testing.T) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
require.NoError(t, err)
|
||||
err = f.copyID(ctx, rootID, dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't copy directory")
|
||||
assert.Contains(t, err.Error(), "can't moveid directory")
|
||||
})
|
||||
|
||||
t.Run("WithoutDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/")
|
||||
t.Run("MoveWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("WithDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||
t.Run("CopyWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("MoveWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
|
||||
t.Run("CopyWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
@@ -551,9 +563,11 @@ func (f *Fs) InternalTestQuery(t *testing.T) {
|
||||
|
||||
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
assert.Len(t, results[0].Id, 33)
|
||||
assert.Equal(t, results[0].Name, item)
|
||||
require.True(t, len(results) > 0)
|
||||
for _, result := range results {
|
||||
assert.True(t, len(result.Id) > 0)
|
||||
assert.Equal(t, result.Name, item)
|
||||
}
|
||||
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
|
||||
}
|
||||
})
|
||||
@@ -564,7 +578,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
opt := &filter.Opt{}
|
||||
opt := &filter.Options{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
flt, err := filter.NewFilter(opt)
|
||||
@@ -645,7 +659,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
|
||||
t.Run("Query", f.InternalTestQuery)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
|
||||
@@ -152,7 +152,7 @@ func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions [
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to set permission: %v", err)
|
||||
fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err)
|
||||
errs.Add(err)
|
||||
}
|
||||
}
|
||||
@@ -262,7 +262,7 @@ func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.La
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set owner: %w", err)
|
||||
return fmt.Errorf("failed to set labels: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -372,6 +372,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
||||
// shared drives.
|
||||
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
|
||||
// Don't process permissions if there aren't any specifically set
|
||||
fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds))
|
||||
info.Permissions = nil
|
||||
info.PermissionIds = nil
|
||||
}
|
||||
@@ -553,7 +554,12 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
// Can't set Owner on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
return f.setOwner(ctx, info, v)
|
||||
err := f.setOwner(ctx, info, v)
|
||||
if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) {
|
||||
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
case "permissions":
|
||||
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||
@@ -566,7 +572,13 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
// Can't set Permissions on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
return f.setPermissions(ctx, info, perms)
|
||||
err := f.setPermissions(ctx, info, perms)
|
||||
if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) {
|
||||
// We've already logged the permissions errors individually here
|
||||
fs.Debugf(f, "Ignoring error as failok is set: %v", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
case "labels":
|
||||
if !f.opt.MetadataLabels.IsSet(rwWrite) {
|
||||
@@ -579,7 +591,12 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
// Can't set Labels on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
return f.setLabels(ctx, info, labels)
|
||||
err := f.setLabels(ctx, info, labels)
|
||||
if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) {
|
||||
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
case "folder-color-rgb":
|
||||
updateInfo.FolderColorRgb = v
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
@@ -21,14 +20,10 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -93,7 +94,7 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
dropboxConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"files.metadata.write",
|
||||
"files.content.write",
|
||||
@@ -108,7 +109,8 @@ var (
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
|
||||
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -133,7 +135,7 @@ var (
|
||||
)
|
||||
|
||||
// Gets an oauth config with the right scopes
|
||||
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
|
||||
// If not impersonating, use standard scopes
|
||||
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
|
||||
return dropboxConfig
|
||||
@@ -216,7 +218,10 @@ are supported.
|
||||
|
||||
Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.`,
|
||||
shared folder.
|
||||
|
||||
See also --dropbox-root-namespace for an alternative way to work with shared
|
||||
folders.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -237,6 +242,11 @@ shared folder.`,
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8,
|
||||
}, {
|
||||
Name: "root_namespace",
|
||||
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
})
|
||||
}
|
||||
@@ -253,6 +263,7 @@ type Options struct {
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -307,32 +318,46 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Some specific errors which should be excluded from retries
|
||||
func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, err
|
||||
}
|
||||
errString := err.Error()
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// First check for specific errors
|
||||
//
|
||||
// These come back from the SDK in a whole host of different
|
||||
// error types, but there doesn't seem to be a consistent way
|
||||
// of reading the error cause, so here we just check using the
|
||||
// error string which isn't perfect but does the job.
|
||||
errString := err.Error()
|
||||
if strings.Contains(errString, "insufficient_space") {
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if strings.Contains(errString, "malformed_path") {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// Then handle any official Retry-After header from Dropbox's SDK
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
errString := err.Error()
|
||||
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
||||
return true, err
|
||||
}
|
||||
@@ -377,7 +402,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
oldToken = strings.TrimSpace(oldToken)
|
||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||
@@ -502,8 +527,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
f.features.Fill(ctx, f)
|
||||
|
||||
// If root starts with / then use the actual root
|
||||
if strings.HasPrefix(root, "/") {
|
||||
if f.opt.RootNsid != "" {
|
||||
f.ns = f.opt.RootNsid
|
||||
fs.Debugf(f, "Overriding root namespace to %q", f.ns)
|
||||
} else if strings.HasPrefix(root, "/") {
|
||||
// If root starts with / then use the actual root
|
||||
var acc *users.FullAccount
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
acc, err = f.users.GetCurrentAccount()
|
||||
@@ -1008,13 +1036,20 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
@@ -1028,7 +1063,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.CopyV2(&arg)
|
||||
@@ -1680,14 +1714,10 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
return false, err // No such user
|
||||
case 186:
|
||||
return false, err // IP blocked?
|
||||
case 374:
|
||||
case 374, 412: // Flood detected seems to be #412 now
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
default:
|
||||
|
||||
@@ -441,23 +441,28 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
|
||||
// Find current directory ID
|
||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If it is in the correct directory, just rename it
|
||||
var url string
|
||||
if currentDirectoryID == directoryID {
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||
if srcDirectoryID == dstDirectoryID {
|
||||
// No rename needed
|
||||
if srcLeaf == dstLeaf {
|
||||
return src, nil
|
||||
}
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
}
|
||||
@@ -466,11 +471,16 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
url = resp.URLs[0].URL
|
||||
} else {
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
dstFolderID, err := strconv.Atoi(dstDirectoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
rename := dstLeaf
|
||||
// No rename needed
|
||||
if srcLeaf == dstLeaf {
|
||||
rename = ""
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
|
||||
901
backend/filescom/filescom.go
Normal file
901
backend/filescom/filescom.go
Normal file
@@ -0,0 +1,901 @@
|
||||
// Package filescom provides an interface to the Files.com
|
||||
// object storage system.
|
||||
package filescom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
files_sdk "github.com/Files-com/files-sdk-go/v3"
|
||||
"github.com/Files-com/files-sdk-go/v3/bundle"
|
||||
"github.com/Files-com/files-sdk-go/v3/file"
|
||||
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
|
||||
"github.com/Files-com/files-sdk-go/v3/folder"
|
||||
"github.com/Files-com/files-sdk-go/v3/session"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
/*
|
||||
Run of rclone info
|
||||
stringNeedsEscaping = []rune{
|
||||
'/', '\x00'
|
||||
}
|
||||
maxFileLength = 512 // for 1 byte unicode characters
|
||||
maxFileLength = 512 // for 2 byte unicode characters
|
||||
maxFileLength = 512 // for 3 byte unicode characters
|
||||
maxFileLength = 512 // for 4 byte unicode characters
|
||||
canWriteUnnormalized = true
|
||||
canReadUnnormalized = true
|
||||
canReadRenormalized = true
|
||||
canStream = true
|
||||
*/
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
folderNotEmpty = "processing-failure/folder-not-empty"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filescom",
|
||||
Description: "Files.com",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "site",
|
||||
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "The username used to authenticate with Files.com.",
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "The password used to authenticate with Files.com.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "api_key",
|
||||
Help: "The API key used to authenticate with Files.com.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Site string `config:"site"`
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
APIKey string `config:"api_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote files.com server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
fileClient *file.Client // the connection to the file API
|
||||
folderClient *folder.Client // the connection to the folder API
|
||||
migrationClient *file_migration.Client // the connection to the file migration API
|
||||
bundleClient *bundle.Client // the connection to the bundle API
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a files object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64 // size of the object
|
||||
crc32 string // CRC32 of the object content
|
||||
md5 string // MD5 of the object content
|
||||
mimeType string // Content-Type of the object
|
||||
modTime time.Time // modification time of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("files root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Encode remote and turn it into an absolute path in the share
|
||||
func (f *Fs) absPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if apiErr.HttpCode == e {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
|
||||
params := files_sdk.FileFindParams{
|
||||
Path: f.absPath(path),
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &file, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
config, err := newClientConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
fileClient: &file.Client{Config: config},
|
||||
folderClient: &folder.Client{Config: config},
|
||||
migrationClient: &file_migration.Client{Config: config},
|
||||
bundleClient: &bundle.Client{Config: config},
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.root != "" {
|
||||
info, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == nil && !info.IsDir() {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
||||
if opt.Site != "" {
|
||||
if strings.Contains(opt.Site, ".") {
|
||||
config.EndpointOverride = opt.Site
|
||||
} else {
|
||||
config.Subdomain = opt.Site
|
||||
}
|
||||
|
||||
_, err = url.ParseRequestURI(config.Endpoint())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
|
||||
|
||||
if opt.APIKey != "" {
|
||||
config.APIKey = opt.APIKey
|
||||
return
|
||||
}
|
||||
|
||||
if opt.Username == "" {
|
||||
err = errors.New("username not found")
|
||||
return
|
||||
}
|
||||
if opt.Password == "" {
|
||||
err = errors.New("password not found")
|
||||
return
|
||||
}
|
||||
opt.Password, err = obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sessionClient := session.Client{Config: config}
|
||||
params := files_sdk.SessionCreateParams{
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
}
|
||||
|
||||
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = fmt.Errorf("couldn't create session: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.SessionId = thisSession.Id
|
||||
return
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
var err error
|
||||
if file != nil {
|
||||
err = o.setMetaData(file)
|
||||
} else {
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
var it *folder.Iter
|
||||
params := files_sdk.FolderListForParams{
|
||||
Path: f.absPath(dir),
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
for it.Next() {
|
||||
item := ptr(it.File())
|
||||
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
|
||||
remote = path.Join(dir, remote)
|
||||
if remote == dir {
|
||||
continue
|
||||
}
|
||||
|
||||
if item.IsDir() {
|
||||
d := fs.NewDir(remote, item.ModTime())
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
err = it.Err()
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
err = f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, path string) error {
|
||||
if path == "" || path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
params := files_sdk.FolderCreateParams{
|
||||
Path: path,
|
||||
MkdirParents: ptr(true),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if files_sdk.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Make the parent directory of remote
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.mkdir(ctx, f.absPath(dir))
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
o := Object{
|
||||
fs: f,
|
||||
remote: dir,
|
||||
}
|
||||
return o.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
path := f.absPath(dir)
|
||||
if path == "" || path == "." {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: path,
|
||||
Recursive: ptr(!check),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
// Allow for eventual consistency deletion of child objects.
|
||||
if isFolderNotEmpty(err) {
|
||||
return true, err
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if isFolderNotEmpty(err) {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return fmt.Errorf("rmdir failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
srcPath := srcObj.fs.absPath(srcObj.remote)
|
||||
dstPath := f.absPath(remote)
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err = f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
params := files_sdk.FileCopyParams{
|
||||
Path: srcPath,
|
||||
Destination: dstPath,
|
||||
Overwrite: ptr(true),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "copy")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = dstObj.SetModTime(ctx, srcObj.modTime)
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
|
||||
// Move the object
|
||||
params := files_sdk.FileMoveParams{
|
||||
Path: src.absPath(srcRemote),
|
||||
Destination: f.absPath(dstRemote),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "move")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
|
||||
var migration files_sdk.FileMigration
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
|
||||
// noop
|
||||
}, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil && migration.Status != "completed" {
|
||||
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
_, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, dstRemote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
|
||||
return
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
|
||||
params := files_sdk.BundleCreateParams{
|
||||
Paths: []string{f.absPath(remote)},
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
|
||||
}
|
||||
|
||||
var bundle files_sdk.Bundle
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
url = bundle.Url
|
||||
return
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet(hash.CRC32, hash.MD5)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
switch t {
|
||||
case hash.CRC32:
|
||||
if o.crc32 == "" {
|
||||
return "", nil
|
||||
}
|
||||
return fmt.Sprintf("%08s", o.crc32), nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(file *files_sdk.File) error {
|
||||
o.modTime = file.ModTime()
|
||||
|
||||
if !file.IsDir() {
|
||||
o.size = file.Size
|
||||
o.crc32 = file.Crc32
|
||||
o.md5 = file.Md5
|
||||
o.mimeType = file.MimeType
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
if file.IsDir() {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
return o.setMetaData(file)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
params := files_sdk.FileUpdateParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
ProvidedMtime: &modTime,
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetaData(&file)
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Offset and Count for range download
|
||||
var offset, count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
params := files_sdk.FileDownloadParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
headers := &http.Header{}
|
||||
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.fileClient.Download(
|
||||
params,
|
||||
files_sdk.WithContext(ctx),
|
||||
files_sdk.RequestHeadersOption(headers),
|
||||
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
|
||||
in = closer
|
||||
return err
|
||||
}),
|
||||
)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Returns a pointer to t - useful for returning pointers to constants
|
||||
func ptr[T any](t T) *T {
|
||||
return &t
|
||||
}
|
||||
|
||||
func isFolderNotEmpty(err error) bool {
|
||||
var re files_sdk.ResponseError
|
||||
ok := errors.As(err, &re)
|
||||
return ok && re.Type == folderNotEmpty
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
uploadOpts := []file.UploadOption{
|
||||
file.UploadWithContext(ctx),
|
||||
file.UploadWithReader(in),
|
||||
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
|
||||
file.UploadWithProvidedMtime(src.ModTime(ctx)),
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Upload(uploadOpts...)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
17
backend/filescom/filescom_test.go
Normal file
17
backend/filescom/filescom_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Files filesystem interface
|
||||
package filescom_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/filescom"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFilesCom:",
|
||||
NilObject: (*filescom.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -85,7 +85,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
@@ -99,7 +99,7 @@ maximum of |--checkers| and |--transfers|.
|
||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`", -1),
|
||||
`, "|", "`"),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -180,12 +180,28 @@ If this is set and no password is supplied then rclone will ask for a password
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_upload",
|
||||
Default: false,
|
||||
Help: `Don't check the upload is OK
|
||||
|
||||
Normally rclone will try to check the upload exists after it has
|
||||
uploaded a file to make sure the size and modification time are as
|
||||
expected.
|
||||
|
||||
This flag stops rclone doing these checks. This enables uploading to
|
||||
folders which are write only.
|
||||
|
||||
You will likely need to use the --inplace flag also if uploading to
|
||||
a write only folder.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
@@ -232,6 +248,7 @@ type Options struct {
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -1303,6 +1320,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("update stor: %w", err)
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
if o.fs.opt.NoCheckUpload {
|
||||
o.info = &FileInfo{
|
||||
Name: o.remote,
|
||||
Size: uint64(src.Size()),
|
||||
ModTime: src.ModTime(ctx),
|
||||
precise: true,
|
||||
IsDir: false,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||
return fmt.Errorf("SetModTime: %w", err)
|
||||
}
|
||||
|
||||
311
backend/gofile/api/types.go
Normal file
311
backend/gofile/api/types.go
Normal file
@@ -0,0 +1,311 @@
|
||||
// Package api has type definitions for gofile
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the
|
||||
// gofile API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from gofile when things go wrong
|
||||
type Error struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Status)
|
||||
return out
|
||||
}
|
||||
|
||||
// IsError returns true if there is an error
|
||||
func (e Error) IsError() bool {
|
||||
return e.Status != "ok"
|
||||
}
|
||||
|
||||
// Err returns err if not nil, or e if IsError or nil
|
||||
func (e Error) Err(err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.IsError() {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by /contents
|
||||
type Item struct {
|
||||
ID string `json:"id"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Code string `json:"code"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ModTime int64 `json:"modTime"`
|
||||
Link string `json:"link"`
|
||||
MD5 string `json:"md5"`
|
||||
MimeType string `json:"mimetype"`
|
||||
ChildrenCount int `json:"childrenCount"`
|
||||
DirectLinks map[string]*DirectLink `json:"directLinks"`
|
||||
//Public bool `json:"public"`
|
||||
//ServerSelected string `json:"serverSelected"`
|
||||
//Thumbnail string `json:"thumbnail"`
|
||||
//DownloadCount int `json:"downloadCount"`
|
||||
//TotalDownloadCount int64 `json:"totalDownloadCount"`
|
||||
//TotalSize int64 `json:"totalSize"`
|
||||
//ChildrenIDs []string `json:"childrenIds"`
|
||||
Children map[string]*Item `json:"children"`
|
||||
}
|
||||
|
||||
// ToNativeTime converts a go time to a native time
|
||||
func ToNativeTime(t time.Time) int64 {
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
// FromNativeTime converts native time to a go time
|
||||
func FromNativeTime(t int64) time.Time {
|
||||
return time.Unix(t, 0)
|
||||
}
|
||||
|
||||
// DirectLink describes a direct link to a file so it can be
|
||||
// downloaded by third parties.
|
||||
type DirectLink struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
DirectLink string `json:"directLink"`
|
||||
}
|
||||
|
||||
// Contents is returned from the /contents call
|
||||
type Contents struct {
|
||||
Error
|
||||
Data struct {
|
||||
Item
|
||||
} `json:"data"`
|
||||
Metadata Metadata `json:"metadata"`
|
||||
}
|
||||
|
||||
// Metadata is returned when paging is in use
|
||||
type Metadata struct {
|
||||
TotalCount int `json:"totalCount"`
|
||||
TotalPages int `json:"totalPages"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
}
|
||||
|
||||
// AccountsGetID is the result of /accounts/getid
|
||||
type AccountsGetID struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// Stats of storage and traffic
|
||||
type Stats struct {
|
||||
FolderCount int64 `json:"folderCount"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
Storage int64 `json:"storage"`
|
||||
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
|
||||
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
|
||||
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
|
||||
}
|
||||
|
||||
// AccountsGet is the result of /accounts/{id}
|
||||
type AccountsGet struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Email string `json:"email"`
|
||||
Tier string `json:"tier"`
|
||||
PremiumType string `json:"premiumType"`
|
||||
Token string `json:"token"`
|
||||
RootFolder string `json:"rootFolder"`
|
||||
SubscriptionProvider string `json:"subscriptionProvider"`
|
||||
SubscriptionEndDate int `json:"subscriptionEndDate"`
|
||||
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
|
||||
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
|
||||
StatsCurrent Stats `json:"statsCurrent"`
|
||||
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest is the input to /contents/createFolder
|
||||
type CreateFolderRequest struct {
|
||||
ParentFolderID string `json:"parentFolderId"`
|
||||
FolderName string `json:"folderName"`
|
||||
ModTime int64 `json:"modTime,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse is the output from /contents/createFolder
|
||||
type CreateFolderResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DeleteRequest is the input to DELETE /contents
|
||||
type DeleteRequest struct {
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /contents
|
||||
type DeleteResponse struct {
|
||||
Error
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// Server is an upload server
|
||||
type Server struct {
|
||||
Name string `json:"name"`
|
||||
Zone string `json:"zone"`
|
||||
}
|
||||
|
||||
// String returns a string representation of the Server
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||
}
|
||||
|
||||
// Root returns the root URL for the server
|
||||
func (s *Server) Root() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||
}
|
||||
|
||||
// URL returns the upload URL for the server
|
||||
func (s *Server) URL() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||
}
|
||||
|
||||
// ServersResponse is the output from /servers
|
||||
type ServersResponse struct {
|
||||
Error
|
||||
Data struct {
|
||||
Servers []Server `json:"servers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
type UploadResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DirectLinksRequest specifies the parameters for the direct link
|
||||
type DirectLinksRequest struct {
|
||||
ExpireTime int64 `json:"expireTime,omitempty"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
|
||||
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
|
||||
Auth []any `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
// DirectLinksResult is returned from POST /contents/{id}/directlinks
|
||||
type DirectLinksResult struct {
|
||||
Error
|
||||
Data struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
ID string `json:"id"`
|
||||
DirectLink string `json:"directLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
|
||||
//
|
||||
// The Value of the attribute to define :
|
||||
// For Attribute "name" : The name of the content (file or folder)
|
||||
// For Attribute "description" : The description displayed on the download page (folder only)
|
||||
// For Attribute "tags" : A comma-separated list of tags (folder only)
|
||||
// For Attribute "public" : either true or false (folder only)
|
||||
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
|
||||
// For Attribute "password" : The password to set (folder only)
|
||||
type UpdateItemRequest struct {
|
||||
Attribute string `json:"attribute"`
|
||||
Value any `json:"attributeValue"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /contents/{id}/update
|
||||
type UpdateItemResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /contents/move
|
||||
type MoveRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /contents/move
|
||||
type MoveResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /contents/copy
|
||||
type CopyRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /contents/copy
|
||||
type CopyResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadServerStatus is returned when fetching the root of an upload server
|
||||
type UploadServerStatus struct {
|
||||
Error
|
||||
Data struct {
|
||||
Server string `json:"server"`
|
||||
Test string `json:"test"`
|
||||
} `json:"data"`
|
||||
}
|
||||
1659
backend/gofile/gofile.go
Normal file
1659
backend/gofile/gofile.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/gofile/gofile_test.go
Normal file
17
backend/gofile/gofile_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Gofile filesystem interface
|
||||
package gofile_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/gofile"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoFile:",
|
||||
NilObject: (*gofile.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -62,9 +62,10 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
storageConfig = &oauthutil.Config{
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -106,6 +107,12 @@ func init() {
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Sensitive: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
@@ -379,6 +386,7 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
AccessToken string `config:"access_token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -535,6 +543,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else if opt.AccessToken != "" {
|
||||
ts := oauth2.Token{AccessToken: opt.AccessToken}
|
||||
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
@@ -697,7 +708,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == directory {
|
||||
if remote == f.opt.Enc.ToStandardPath(directory) {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
@@ -944,7 +955,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
|
||||
@@ -28,13 +28,11 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
@@ -61,13 +59,14 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -160,6 +159,34 @@ listings and transferred.
|
||||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "proxy",
|
||||
Default: "",
|
||||
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
|
||||
|
||||
The Google API will deliver images and video which aren't full
|
||||
resolution, and/or have EXIF data missing.
|
||||
|
||||
However if you ue the gphotosdl proxy tnen you can download original,
|
||||
unchanged images.
|
||||
|
||||
This runs a headless browser in the background.
|
||||
|
||||
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
|
||||
|
||||
First run with
|
||||
|
||||
gphotosdl -login
|
||||
|
||||
Then once you have logged into google photos close the browser window
|
||||
and run
|
||||
|
||||
gphotosdl
|
||||
|
||||
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
|
||||
rclone use the proxy.
|
||||
`, "|", "`"),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -181,6 +208,7 @@ type Options struct {
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
Proxy string `config:"proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -454,7 +482,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("")
|
||||
// defer log.Trace(f, "remote=%q", remote)("")
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
@@ -620,9 +648,7 @@ func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -669,7 +695,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil || pattern.isFile {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
@@ -686,7 +712,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
defer log.Trace(f, "src=%+v", src)("")
|
||||
// defer log.Trace(f, "src=%+v", src)("")
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
@@ -739,7 +765,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
|
||||
|
||||
// Mkdir creates the album if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
@@ -763,7 +789,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
match, _, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
@@ -836,7 +862,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||
return o.bytes
|
||||
}
|
||||
@@ -937,7 +963,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||
@@ -967,16 +993,20 @@ func (o *Object) downloadURL() string {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
url := o.downloadURL()
|
||||
if o.fs.opt.Proxy != "" {
|
||||
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.downloadURL(),
|
||||
RootURL: url,
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1069,7 +1099,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||
return errCantUpload
|
||||
@@ -1138,7 +1168,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
errors := make([]error, 1)
|
||||
results := make([]*api.MediaItem, 1)
|
||||
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
err = errors[0]
|
||||
info = results[0]
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -35,7 +36,7 @@ func TestIntegration(t *testing.T) {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -535,6 +535,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -48,11 +47,9 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app.
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
|
||||
@@ -331,12 +331,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// Join's the remote onto the base URL
|
||||
func (f *Fs) url(remote string) string {
|
||||
trimmedRemote := strings.TrimLeft(remote, "/") // remove leading "/" since we always have it in f.endpointURL
|
||||
if f.opt.NoEscape {
|
||||
// Directly concatenate without escaping, no_escape behavior
|
||||
return f.endpointURL + remote
|
||||
return f.endpointURL + trimmedRemote
|
||||
}
|
||||
// Default behavior
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
return f.endpointURL + rest.URLPathEscape(trimmedRemote)
|
||||
}
|
||||
|
||||
// Errors returned by parseName
|
||||
|
||||
@@ -191,6 +191,33 @@ func TestNewObject(t *testing.T) {
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestNewObjectWithLeadingSlash(t *testing.T) {
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "/four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "/four/under four.txt", o.Remote())
|
||||
assert.Equal(t, int64(8+lineEndSize), o.Size())
|
||||
_, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Test the time is correct on the object
|
||||
|
||||
tObj := o.ModTime(context.Background())
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject(context.Background(), "/not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
m := prepareServer(t)
|
||||
|
||||
|
||||
166
backend/iclouddrive/api/client.go
Normal file
166
backend/iclouddrive/api/client.go
Normal file
@@ -0,0 +1,166 @@
|
||||
// Package api provides functionality for interacting with the iCloud API.
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
baseEndpoint = "https://www.icloud.com"
|
||||
homeEndpoint = "https://www.icloud.com"
|
||||
setupEndpoint = "https://setup.icloud.com/setup/ws/1"
|
||||
authEndpoint = "https://idmsa.apple.com/appleauth/auth"
|
||||
)
|
||||
|
||||
type sessionSave func(*Session)
|
||||
|
||||
// Client defines the client configuration
|
||||
type Client struct {
|
||||
appleID string
|
||||
password string
|
||||
srv *rest.Client
|
||||
Session *Session
|
||||
sessionSaveCallback sessionSave
|
||||
|
||||
drive *DriveService
|
||||
}
|
||||
|
||||
// New creates a new Client instance with the provided Apple ID, password, trust token, cookies, and session save callback.
|
||||
//
|
||||
// Parameters:
|
||||
// - appleID: the Apple ID of the user.
|
||||
// - password: the password of the user.
|
||||
// - trustToken: the trust token for the session.
|
||||
// - clientID: the client id for the session.
|
||||
// - cookies: the cookies for the session.
|
||||
// - sessionSaveCallback: the callback function to save the session.
|
||||
func New(appleID, password, trustToken string, clientID string, cookies []*http.Cookie, sessionSaveCallback sessionSave) (*Client, error) {
|
||||
icloud := &Client{
|
||||
appleID: appleID,
|
||||
password: password,
|
||||
srv: rest.NewClient(fshttp.NewClient(context.Background())),
|
||||
Session: NewSession(),
|
||||
sessionSaveCallback: sessionSaveCallback,
|
||||
}
|
||||
|
||||
icloud.Session.TrustToken = trustToken
|
||||
icloud.Session.Cookies = cookies
|
||||
icloud.Session.ClientID = clientID
|
||||
return icloud, nil
|
||||
}
|
||||
|
||||
// DriveService returns the DriveService instance associated with the Client.
|
||||
func (c *Client) DriveService() (*DriveService, error) {
|
||||
var err error
|
||||
if c.drive == nil {
|
||||
c.drive, err = NewDriveService(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return c.drive, nil
|
||||
}
|
||||
|
||||
// Request makes a request and retries it if the session is invalid.
|
||||
//
|
||||
// This function is the main entry point for making requests to the iCloud
|
||||
// API. If the initial request returns a 401 (Unauthorized), it will try to
|
||||
// reauthenticate and retry the request.
|
||||
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||
if err != nil && resp != nil {
|
||||
// try to reauth
|
||||
if resp.StatusCode == 401 || resp.StatusCode == 421 {
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c.Session.Requires2FA() {
|
||||
return nil, errors.New("trust token expired, please reauth")
|
||||
}
|
||||
return c.RequestNoReAuth(ctx, opts, request, response)
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// RequestNoReAuth makes a request without re-authenticating.
|
||||
//
|
||||
// This function is useful when you have a session that is already
|
||||
// authenticated, but you need to make a request without triggering
|
||||
// a re-authentication.
|
||||
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
// Make the request without re-authenticating
|
||||
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Authenticate authenticates the client with the iCloud API.
|
||||
func (c *Client) Authenticate(ctx context.Context) error {
|
||||
if c.Session.Cookies != nil {
|
||||
if err := c.Session.ValidateSession(ctx); err == nil {
|
||||
fs.Debugf("icloud", "Valid session, no need to reauth")
|
||||
return nil
|
||||
}
|
||||
c.Session.Cookies = nil
|
||||
}
|
||||
|
||||
fs.Debugf("icloud", "Authenticating as %s\n", c.appleID)
|
||||
err := c.Session.SignIn(ctx, c.appleID, c.password)
|
||||
|
||||
if err == nil {
|
||||
err = c.Session.AuthWithToken(ctx)
|
||||
if err == nil && c.sessionSaveCallback != nil {
|
||||
c.sessionSaveCallback(c.Session)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SignIn signs in the client using the provided context and credentials.
|
||||
func (c *Client) SignIn(ctx context.Context) error {
|
||||
return c.Session.SignIn(ctx, c.appleID, c.password)
|
||||
}
|
||||
|
||||
// IntoReader marshals the provided values into a JSON encoded reader
|
||||
func IntoReader(values any) (*bytes.Reader, error) {
|
||||
m, err := json.Marshal(values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes.NewReader(m), nil
|
||||
}
|
||||
|
||||
// RequestError holds info on a result state, icloud can return a 200 but the result is unknown
|
||||
type RequestError struct {
|
||||
Status string
|
||||
Text string
|
||||
}
|
||||
|
||||
// Error satisfy the error interface.
|
||||
func (e *RequestError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Text, e.Status)
|
||||
}
|
||||
|
||||
func newRequestError(Status string, Text string) *RequestError {
|
||||
return &RequestError{
|
||||
Status: strings.ToLower(Status),
|
||||
Text: Text,
|
||||
}
|
||||
}
|
||||
|
||||
// newErr orf makes a new error from sprintf parameters.
|
||||
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
|
||||
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
|
||||
}
|
||||
913
backend/iclouddrive/api/drive.go
Normal file
913
backend/iclouddrive/api/drive.go
Normal file
@@ -0,0 +1,913 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultZone = "com.apple.CloudDocs"
|
||||
statusOk = "OK"
|
||||
statusEtagConflict = "ETAG_CONFLICT"
|
||||
)
|
||||
|
||||
// DriveService represents an iCloud Drive service.
|
||||
type DriveService struct {
|
||||
icloud *Client
|
||||
RootID string
|
||||
endpoint string
|
||||
docsEndpoint string
|
||||
}
|
||||
|
||||
// NewDriveService creates a new DriveService instance.
|
||||
func NewDriveService(icloud *Client) (*DriveService, error) {
|
||||
return &DriveService{icloud: icloud, RootID: "FOLDER::com.apple.CloudDocs::root", endpoint: icloud.Session.AccountInfo.Webservices["drivews"].URL, docsEndpoint: icloud.Session.AccountInfo.Webservices["docws"].URL}, nil
|
||||
}
|
||||
|
||||
// GetItemByDriveID retrieves a DriveItem by its Drive ID.
|
||||
func (d *DriveService) GetItemByDriveID(ctx context.Context, id string, includeChildren bool) (*DriveItem, *http.Response, error) {
|
||||
items, resp, err := d.GetItemsByDriveID(ctx, []string{id}, includeChildren)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return items[0], resp, err
|
||||
}
|
||||
|
||||
// GetItemsByDriveID retrieves DriveItems by their Drive IDs.
|
||||
func (d *DriveService) GetItemsByDriveID(ctx context.Context, ids []string, includeChildren bool) ([]*DriveItem, *http.Response, error) {
|
||||
var err error
|
||||
_items := []map[string]any{}
|
||||
for _, id := range ids {
|
||||
_items = append(_items, map[string]any{
|
||||
"drivewsid": id,
|
||||
"partialData": false,
|
||||
"includeHierarchy": false,
|
||||
})
|
||||
}
|
||||
|
||||
var body *bytes.Reader
|
||||
var path string
|
||||
if !includeChildren {
|
||||
values := []map[string]any{{
|
||||
"items": _items,
|
||||
}}
|
||||
body, err = IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
path = "/retrieveItemDetails"
|
||||
} else {
|
||||
values := _items
|
||||
body, err = IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
path = "/retrieveItemDetailsInFolders"
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: path,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
var items []*DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return items, resp, err
|
||||
}
|
||||
|
||||
// GetDocByPath retrieves a document by its path.
|
||||
func (d *DriveService) GetDocByPath(ctx context.Context, path string) (*Document, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("unified_format", "false")
|
||||
body, err := IntoReader(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
Body: body,
|
||||
}
|
||||
var item []*Document
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item[0], resp, err
|
||||
}
|
||||
|
||||
// GetItemByPath retrieves a DriveItem by its path.
|
||||
func (d *DriveService) GetItemByPath(ctx context.Context, path string) (*DriveItem, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("unified_format", "true")
|
||||
|
||||
body, err := IntoReader(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
Body: body,
|
||||
}
|
||||
var item []*DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item[0], resp, err
|
||||
}
|
||||
|
||||
// GetDocByItemID retrieves a document by its item ID.
|
||||
func (d *DriveService) GetDocByItemID(ctx context.Context, id string) (*Document, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("document_id", id)
|
||||
values.Set("unified_format", "false") // important
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/ws/" + defaultZone + "/list/lookup_by_id",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
}
|
||||
var item *Document
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item, resp, err
|
||||
}
|
||||
|
||||
// GetItemRawByItemID retrieves a DriveItemRaw by its item ID.
|
||||
func (d *DriveService) GetItemRawByItemID(ctx context.Context, id string) (*DriveItemRaw, *http.Response, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/v1/item/" + id,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
}
|
||||
var item *DriveItemRaw
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item, resp, err
|
||||
}
|
||||
|
||||
// GetItemsInFolder retrieves a list of DriveItemRaw objects in a folder with the given ID.
|
||||
func (d *DriveService) GetItemsInFolder(ctx context.Context, id string, limit int64) ([]*DriveItemRaw, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("limit", strconv.FormatInt(limit, 10))
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/v1/enumerate/" + id,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
items := struct {
|
||||
Items []*DriveItemRaw `json:"drive_item"`
|
||||
}{}
|
||||
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return items.Items, resp, err
|
||||
}
|
||||
|
||||
// GetDownloadURLByDriveID retrieves the download URL for a file in the DriveService.
|
||||
func (d *DriveService) GetDownloadURLByDriveID(ctx context.Context, id string) (string, *http.Response, error) {
|
||||
_, zone, docid := DeconstructDriveID(id)
|
||||
values := url.Values{}
|
||||
values.Set("document_id", docid)
|
||||
|
||||
if zone == "" {
|
||||
zone = defaultZone
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/ws/" + zone + "/download/by_id",
|
||||
Parameters: values,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
}
|
||||
|
||||
var filer *FileRequest
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &filer)
|
||||
|
||||
if err != nil {
|
||||
return "", resp, err
|
||||
}
|
||||
|
||||
var url string
|
||||
if filer.DataToken != nil {
|
||||
url = filer.DataToken.URL
|
||||
} else {
|
||||
url = filer.PackageToken.URL
|
||||
}
|
||||
|
||||
return url, resp, err
|
||||
}
|
||||
|
||||
// DownloadFile downloads a file from the given URL using the provided options.
|
||||
func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.OpenOption) (*http.Response, error) {
|
||||
opts := &rest.Opts{
|
||||
Method: "GET",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: url,
|
||||
Options: opt,
|
||||
}
|
||||
|
||||
resp, err := d.icloud.srv.Call(ctx, opts)
|
||||
if err != nil {
|
||||
// icloud has some weird http codes
|
||||
if resp.StatusCode == 330 {
|
||||
loc, err := resp.Location()
|
||||
if err == nil {
|
||||
return d.DownloadFile(ctx, loc.String(), opt)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
return d.icloud.srv.Call(ctx, opts)
|
||||
}
|
||||
|
||||
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
|
||||
func (d *DriveService) MoveItemToTrashByItemID(ctx context.Context, id, etag string, force bool) (*DriveItem, *http.Response, error) {
|
||||
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.MoveItemToTrashByID(ctx, doc.DriveID(), etag, force)
|
||||
}
|
||||
|
||||
// MoveItemToTrashByID moves an item to the trash based on the item ID.
|
||||
func (d *DriveService) MoveItemToTrashByID(ctx context.Context, drivewsid, etag string, force bool) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"items": []map[string]any{{
|
||||
"drivewsid": drivewsid,
|
||||
"etag": etag,
|
||||
"clientId": drivewsid,
|
||||
}}}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/moveItemsToTrash",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
item := struct {
|
||||
Items []*DriveItem `json:"items"`
|
||||
}{}
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
if item.Items[0].Status != statusOk {
|
||||
// rerun with latest etag
|
||||
if force && item.Items[0].Status == "ETAG_CONFLICT" {
|
||||
return d.MoveItemToTrashByID(ctx, drivewsid, item.Items[0].Etag, false)
|
||||
}
|
||||
|
||||
err = newRequestError(item.Items[0].Status, "unknown request status")
|
||||
}
|
||||
|
||||
return item.Items[0], resp, err
|
||||
}
|
||||
|
||||
// CreateNewFolderByItemID creates a new folder by item ID.
|
||||
func (d *DriveService) CreateNewFolderByItemID(ctx context.Context, id, name string) (*DriveItem, *http.Response, error) {
|
||||
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.CreateNewFolderByDriveID(ctx, doc.DriveID(), name)
|
||||
}
|
||||
|
||||
// CreateNewFolderByDriveID creates a new folder by its Drive ID.
|
||||
func (d *DriveService) CreateNewFolderByDriveID(ctx context.Context, drivewsid, name string) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"destinationDrivewsId": drivewsid,
|
||||
"folders": []map[string]any{{
|
||||
"clientId": "FOLDER::UNKNOWN_ZONE::TempId-" + uuid.New().String(),
|
||||
"name": name,
|
||||
}},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/createFolders",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
var fResp *CreateFoldersResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &fResp)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
status := fResp.Folders[0].Status
|
||||
if status != statusOk {
|
||||
err = newRequestError(status, "unknown request status")
|
||||
}
|
||||
|
||||
return fResp.Folders[0], resp, err
|
||||
}
|
||||
|
||||
// RenameItemByItemID renames a DriveItem by its item ID.
|
||||
func (d *DriveService) RenameItemByItemID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
|
||||
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.RenameItemByDriveID(ctx, doc.DriveID(), doc.Etag, name, force)
|
||||
}
|
||||
|
||||
// RenameItemByDriveID renames a DriveItem by its drive ID.
|
||||
func (d *DriveService) RenameItemByDriveID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"items": []map[string]any{{
|
||||
"drivewsid": id,
|
||||
"name": name,
|
||||
"etag": etag,
|
||||
// "extension": split[1],
|
||||
}},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/renameItems",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
var items *DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
status := items.Items[0].Status
|
||||
if status != statusOk {
|
||||
// rerun with latest etag
|
||||
if force && status == "ETAG_CONFLICT" {
|
||||
return d.RenameItemByDriveID(ctx, id, items.Items[0].Etag, name, false)
|
||||
}
|
||||
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
|
||||
}
|
||||
|
||||
return items.Items[0], resp, err
|
||||
}
|
||||
|
||||
// MoveItemByItemID moves an item by its item ID to a destination item ID.
|
||||
func (d *DriveService) MoveItemByItemID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
|
||||
docSrc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
docDst, resp, err := d.GetDocByItemID(ctx, dstID)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.MoveItemByDriveID(ctx, docSrc.DriveID(), docSrc.Etag, docDst.DriveID(), force)
|
||||
}
|
||||
|
||||
// MoveItemByDocID moves an item by its doc ID.
|
||||
// func (d *DriveService) MoveItemByDocID(ctx context.Context, srcDocID, srcEtag, dstDocID string, force bool) (*DriveItem, *http.Response, error) {
|
||||
// return d.MoveItemByDriveID(ctx, srcDocID, srcEtag, docDst.DriveID(), force)
|
||||
// }
|
||||
|
||||
// MoveItemByDriveID moves an item by its drive ID.
|
||||
func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"destinationDrivewsId": dstID,
|
||||
"items": []map[string]any{{
|
||||
"drivewsid": id,
|
||||
"etag": etag,
|
||||
"clientId": id,
|
||||
}},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/moveItems",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
var items *DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
status := items.Items[0].Status
|
||||
if status != statusOk {
|
||||
// rerun with latest etag
|
||||
if force && status == "ETAG_CONFLICT" {
|
||||
return d.MoveItemByDriveID(ctx, id, items.Items[0].Etag, dstID, false)
|
||||
}
|
||||
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
|
||||
}
|
||||
|
||||
return items.Items[0], resp, err
|
||||
}
|
||||
|
||||
// CopyDocByItemID copies a document by its item ID.
|
||||
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
|
||||
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
|
||||
values := map[string]any{
|
||||
"info_to_update": map[string]any{},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/v1/item/copy/" + itemID,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
var info *DriveItemRaw
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// CreateUpload creates an url for an upload.
|
||||
func (d *DriveService) CreateUpload(ctx context.Context, size int64, name string) (*UploadResponse, *http.Response, error) {
|
||||
// first we need to request an upload url
|
||||
values := map[string]any{
|
||||
"filename": name,
|
||||
"type": "FILE",
|
||||
"size": strconv.FormatInt(size, 10),
|
||||
"content_type": GetContentTypeForFile(name),
|
||||
}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/upload/web",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
var responseInfo []*UploadResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return responseInfo[0], resp, err
|
||||
}
|
||||
|
||||
// Upload uploads a file to the given url
|
||||
func (d *DriveService) Upload(ctx context.Context, in io.Reader, size int64, name, uploadURL string) (*SingleFileResponse, *http.Response, error) {
|
||||
// TODO: implement multipart upload
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: uploadURL,
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
ContentType: GetContentTypeForFile(name),
|
||||
// MultipartContentName: "files",
|
||||
MultipartFileName: name,
|
||||
}
|
||||
var singleFileResponse *SingleFileResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &singleFileResponse)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return singleFileResponse, resp, err
|
||||
}
|
||||
|
||||
// UpdateFile updates a file in the DriveService.
|
||||
//
|
||||
// ctx: the context.Context object for the request.
|
||||
// r: a pointer to the UpdateFileInfo struct containing the information for the file update.
|
||||
// Returns a pointer to the DriveItem struct representing the updated file, the http.Response object, and an error if any.
|
||||
func (d *DriveService) UpdateFile(ctx context.Context, r *UpdateFileInfo) (*DriveItem, *http.Response, error) {
|
||||
body, err := IntoReader(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/update/documents",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
var responseInfo *DocumentUpdateResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
doc := responseInfo.Results[0].Document
|
||||
item := DriveItem{
|
||||
Drivewsid: "FILE::com.apple.CloudDocs::" + doc.DocumentID,
|
||||
Docwsid: doc.DocumentID,
|
||||
Itemid: doc.ItemID,
|
||||
Etag: doc.Etag,
|
||||
ParentID: doc.ParentID,
|
||||
DateModified: time.Unix(r.Mtime, 0),
|
||||
DateCreated: time.Unix(r.Mtime, 0),
|
||||
Type: doc.Type,
|
||||
Name: doc.Name,
|
||||
Size: doc.Size,
|
||||
}
|
||||
|
||||
return &item, resp, err
|
||||
}
|
||||
|
||||
// UpdateFileInfo represents the information for an update to a file in the DriveService.
|
||||
type UpdateFileInfo struct {
|
||||
AllowConflict bool `json:"allow_conflict"`
|
||||
Btime int64 `json:"btime"`
|
||||
Command string `json:"command"`
|
||||
CreateShortGUID bool `json:"create_short_guid"`
|
||||
Data struct {
|
||||
Receipt string `json:"receipt,omitempty"`
|
||||
ReferenceSignature string `json:"reference_signature,omitempty"`
|
||||
Signature string `json:"signature,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
WrappingKey string `json:"wrapping_key,omitempty"`
|
||||
} `json:"data,omitempty"`
|
||||
DocumentID string `json:"document_id"`
|
||||
FileFlags FileFlags `json:"file_flags"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
Path struct {
|
||||
Path string `json:"path"`
|
||||
StartingDocumentID string `json:"starting_document_id"`
|
||||
} `json:"path"`
|
||||
}
|
||||
|
||||
// FileFlags defines the file flags for a document.
|
||||
type FileFlags struct {
|
||||
IsExecutable bool `json:"is_executable"`
|
||||
IsHidden bool `json:"is_hidden"`
|
||||
IsWritable bool `json:"is_writable"`
|
||||
}
|
||||
|
||||
// NewUpdateFileInfo creates a new UpdateFileInfo object with default values.
|
||||
//
|
||||
// Returns an UpdateFileInfo object.
|
||||
func NewUpdateFileInfo() UpdateFileInfo {
|
||||
return UpdateFileInfo{
|
||||
Command: "add_file",
|
||||
CreateShortGUID: true,
|
||||
AllowConflict: true,
|
||||
FileFlags: FileFlags{
|
||||
IsExecutable: true,
|
||||
IsHidden: false,
|
||||
IsWritable: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// DriveItemRaw is a raw drive item.
|
||||
// not suure what to call this but there seems to be a "unified" and non "unified" drive item response. This is the non unified.
|
||||
type DriveItemRaw struct {
|
||||
ItemID string `json:"item_id"`
|
||||
ItemInfo *DriveItemRawInfo `json:"item_info"`
|
||||
}
|
||||
|
||||
// SplitName splits the name of a DriveItemRaw into its name and extension.
|
||||
//
|
||||
// It returns the name and extension as separate strings. If the name ends with a dot,
|
||||
// it means there is no extension, so an empty string is returned for the extension.
|
||||
// If the name does not contain a dot, it means
|
||||
func (d *DriveItemRaw) SplitName() (string, string) {
|
||||
name := d.ItemInfo.Name
|
||||
// ends with a dot, no extension
|
||||
if strings.HasSuffix(name, ".") {
|
||||
return name, ""
|
||||
}
|
||||
lastInd := strings.LastIndex(name, ".")
|
||||
|
||||
if lastInd == -1 {
|
||||
return name, ""
|
||||
}
|
||||
return name[:lastInd], name[lastInd+1:]
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the DriveItemRaw.
|
||||
//
|
||||
// It parses the ModifiedAt field of the ItemInfo struct and converts it to a time.Time value.
|
||||
// If the parsing fails, it returns the zero value of time.Time.
|
||||
// The returned time.Time value represents the modification time of the DriveItemRaw.
|
||||
func (d *DriveItemRaw) ModTime() time.Time {
|
||||
i, err := strconv.ParseInt(d.ItemInfo.ModifiedAt, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.UnixMilli(i)
|
||||
}
|
||||
|
||||
// CreatedTime returns the creation time of the DriveItemRaw.
|
||||
//
|
||||
// It parses the CreatedAt field of the ItemInfo struct and converts it to a time.Time value.
|
||||
// If the parsing fails, it returns the zero value of time.Time.
|
||||
// The returned time.Time
|
||||
func (d *DriveItemRaw) CreatedTime() time.Time {
|
||||
i, err := strconv.ParseInt(d.ItemInfo.CreatedAt, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.UnixMilli(i)
|
||||
}
|
||||
|
||||
// DriveItemRawInfo is the raw information about a drive item.
|
||||
type DriveItemRawInfo struct {
|
||||
Name string `json:"name"`
|
||||
// Extension is absolutely borked on endpoints so dont use it.
|
||||
Extension string `json:"extension"`
|
||||
Size int64 `json:"size,string"`
|
||||
Type string `json:"type"`
|
||||
Version string `json:"version"`
|
||||
ModifiedAt string `json:"modified_at"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
Urls struct {
|
||||
URLDownload string `json:"url_download"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// IntoDriveItem converts a DriveItemRaw into a DriveItem.
|
||||
//
|
||||
// It takes no parameters.
|
||||
// It returns a pointer to a DriveItem.
|
||||
func (d *DriveItemRaw) IntoDriveItem() *DriveItem {
|
||||
name, extension := d.SplitName()
|
||||
return &DriveItem{
|
||||
Itemid: d.ItemID,
|
||||
Name: name,
|
||||
Extension: extension,
|
||||
Type: d.ItemInfo.Type,
|
||||
Etag: d.ItemInfo.Version,
|
||||
DateModified: d.ModTime(),
|
||||
DateCreated: d.CreatedTime(),
|
||||
Size: d.ItemInfo.Size,
|
||||
Urls: d.ItemInfo.Urls,
|
||||
}
|
||||
}
|
||||
|
||||
// DocumentUpdateResponse is the response of a document update request.
|
||||
type DocumentUpdateResponse struct {
|
||||
Status struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
} `json:"status"`
|
||||
Results []struct {
|
||||
Status struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
} `json:"status"`
|
||||
OperationID interface{} `json:"operation_id"`
|
||||
Document *Document `json:"document"`
|
||||
} `json:"results"`
|
||||
}
|
||||
|
||||
// Document represents a document on iCloud.
|
||||
type Document struct {
|
||||
Status struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
} `json:"status"`
|
||||
DocumentID string `json:"document_id"`
|
||||
ItemID string `json:"item_id"`
|
||||
Urls struct {
|
||||
URLDownload string `json:"url_download"`
|
||||
} `json:"urls"`
|
||||
Etag string `json:"etag"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Deleted bool `json:"deleted"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
LastEditorName string `json:"last_editor_name"`
|
||||
Data DocumentData `json:"data"`
|
||||
Size int64 `json:"size"`
|
||||
Btime int64 `json:"btime"`
|
||||
Zone string `json:"zone"`
|
||||
FileFlags struct {
|
||||
IsExecutable bool `json:"is_executable"`
|
||||
IsWritable bool `json:"is_writable"`
|
||||
IsHidden bool `json:"is_hidden"`
|
||||
} `json:"file_flags"`
|
||||
LastOpenedTime int64 `json:"lastOpenedTime"`
|
||||
RestorePath interface{} `json:"restorePath"`
|
||||
HasChainedParent bool `json:"hasChainedParent"`
|
||||
}
|
||||
|
||||
// DriveID returns the drive ID of the Document.
|
||||
func (d *Document) DriveID() string {
|
||||
if d.Zone == "" {
|
||||
d.Zone = defaultZone
|
||||
}
|
||||
return d.Type + "::" + d.Zone + "::" + d.DocumentID
|
||||
}
|
||||
|
||||
// DocumentData represents the data of a document.
|
||||
type DocumentData struct {
|
||||
Signature string `json:"signature"`
|
||||
Owner string `json:"owner"`
|
||||
Size int64 `json:"size"`
|
||||
ReferenceSignature string `json:"reference_signature"`
|
||||
WrappingKey string `json:"wrapping_key"`
|
||||
PcsInfo string `json:"pcsInfo"`
|
||||
}
|
||||
|
||||
// SingleFileResponse is the response of a single file request.
|
||||
type SingleFileResponse struct {
|
||||
SingleFile *SingleFileInfo `json:"singleFile"`
|
||||
}
|
||||
|
||||
// SingleFileInfo represents the information of a single file.
|
||||
type SingleFileInfo struct {
|
||||
ReferenceSignature string `json:"referenceChecksum"`
|
||||
Size int64 `json:"size"`
|
||||
Signature string `json:"fileChecksum"`
|
||||
WrappingKey string `json:"wrappingKey"`
|
||||
Receipt string `json:"receipt"`
|
||||
}
|
||||
|
||||
// UploadResponse is the response of an upload request.
|
||||
type UploadResponse struct {
|
||||
URL string `json:"url"`
|
||||
DocumentID string `json:"document_id"`
|
||||
}
|
||||
|
||||
// FileRequestToken represents the token of a file request.
|
||||
type FileRequestToken struct {
|
||||
URL string `json:"url"`
|
||||
Token string `json:"token"`
|
||||
Signature string `json:"signature"`
|
||||
WrappingKey string `json:"wrapping_key"`
|
||||
ReferenceSignature string `json:"reference_signature"`
|
||||
}
|
||||
|
||||
// FileRequest represents the request of a file.
|
||||
type FileRequest struct {
|
||||
DocumentID string `json:"document_id"`
|
||||
ItemID string `json:"item_id"`
|
||||
OwnerDsid int64 `json:"owner_dsid"`
|
||||
DataToken *FileRequestToken `json:"data_token,omitempty"`
|
||||
PackageToken *FileRequestToken `json:"package_token,omitempty"`
|
||||
DoubleEtag string `json:"double_etag"`
|
||||
}
|
||||
|
||||
// CreateFoldersResponse is the response of a create folders request.
|
||||
type CreateFoldersResponse struct {
|
||||
Folders []*DriveItem `json:"folders"`
|
||||
}
|
||||
|
||||
// DriveItem represents an item on iCloud.
|
||||
type DriveItem struct {
|
||||
DateCreated time.Time `json:"dateCreated"`
|
||||
Drivewsid string `json:"drivewsid"`
|
||||
Docwsid string `json:"docwsid"`
|
||||
Itemid string `json:"item_id"`
|
||||
Zone string `json:"zone"`
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parentId"`
|
||||
Hierarchy []DriveItem `json:"hierarchy"`
|
||||
Etag string `json:"etag"`
|
||||
Type string `json:"type"`
|
||||
AssetQuota int64 `json:"assetQuota"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
ShareCount int64 `json:"shareCount"`
|
||||
ShareAliasCount int64 `json:"shareAliasCount"`
|
||||
DirectChildrenCount int64 `json:"directChildrenCount"`
|
||||
Items []*DriveItem `json:"items"`
|
||||
NumberOfItems int64 `json:"numberOfItems"`
|
||||
Status string `json:"status"`
|
||||
Extension string `json:"extension,omitempty"`
|
||||
DateModified time.Time `json:"dateModified,omitempty"`
|
||||
DateChanged time.Time `json:"dateChanged,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
LastOpenTime time.Time `json:"lastOpenTime,omitempty"`
|
||||
Urls struct {
|
||||
URLDownload string `json:"url_download"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// IsFolder returns true if the item is a folder.
|
||||
func (d *DriveItem) IsFolder() bool {
|
||||
return d.Type == "FOLDER" || d.Type == "APP_CONTAINER" || d.Type == "APP_LIBRARY"
|
||||
}
|
||||
|
||||
// DownloadURL returns the download URL of the item.
|
||||
func (d *DriveItem) DownloadURL() string {
|
||||
return d.Urls.URLDownload
|
||||
}
|
||||
|
||||
// FullName returns the full name of the item.
|
||||
// name + extension
|
||||
func (d *DriveItem) FullName() string {
|
||||
if d.Extension != "" {
|
||||
return d.Name + "." + d.Extension
|
||||
}
|
||||
return d.Name
|
||||
}
|
||||
|
||||
// GetDocIDFromDriveID returns the DocumentID from the drive ID.
|
||||
func GetDocIDFromDriveID(id string) string {
|
||||
split := strings.Split(id, "::")
|
||||
return split[len(split)-1]
|
||||
}
|
||||
|
||||
// DeconstructDriveID returns the document type, zone, and document ID from the drive ID.
|
||||
func DeconstructDriveID(id string) (docType, zone, docid string) {
|
||||
split := strings.Split(id, "::")
|
||||
if len(split) < 3 {
|
||||
return "", "", id
|
||||
}
|
||||
return split[0], split[1], split[2]
|
||||
}
|
||||
|
||||
// ConstructDriveID constructs a drive ID from the given components.
|
||||
func ConstructDriveID(id string, zone string, t string) string {
|
||||
return strings.Join([]string{t, zone, id}, "::")
|
||||
}
|
||||
|
||||
// GetContentTypeForFile detects content type for given file name.
|
||||
func GetContentTypeForFile(name string) string {
|
||||
// detect MIME type by looking at the filename only
|
||||
mimeType := mime.TypeByExtension(filepath.Ext(name))
|
||||
if mimeType == "" {
|
||||
// api requires a mime type passed in
|
||||
mimeType = "text/plain"
|
||||
}
|
||||
return strings.Split(mimeType, ";")[0]
|
||||
}
|
||||
412
backend/iclouddrive/api/session.go
Normal file
412
backend/iclouddrive/api/session.go
Normal file
@@ -0,0 +1,412 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Session represents an iCloud session
|
||||
type Session struct {
|
||||
SessionToken string `json:"session_token"`
|
||||
Scnt string `json:"scnt"`
|
||||
SessionID string `json:"session_id"`
|
||||
AccountCountry string `json:"account_country"`
|
||||
TrustToken string `json:"trust_token"`
|
||||
ClientID string `json:"client_id"`
|
||||
Cookies []*http.Cookie `json:"cookies"`
|
||||
AccountInfo AccountInfo `json:"account_info"`
|
||||
|
||||
srv *rest.Client `json:"-"`
|
||||
}
|
||||
|
||||
// String returns the session as a string
|
||||
// func (s *Session) String() string {
|
||||
// jsession, _ := json.Marshal(s)
|
||||
// return string(jsession)
|
||||
// }
|
||||
|
||||
// Request makes a request
|
||||
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
|
||||
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if val := resp.Header.Get("X-Apple-ID-Account-Country"); val != "" {
|
||||
s.AccountCountry = val
|
||||
}
|
||||
if val := resp.Header.Get("X-Apple-ID-Session-Id"); val != "" {
|
||||
s.SessionID = val
|
||||
}
|
||||
if val := resp.Header.Get("X-Apple-Session-Token"); val != "" {
|
||||
s.SessionToken = val
|
||||
}
|
||||
if val := resp.Header.Get("X-Apple-TwoSV-Trust-Token"); val != "" {
|
||||
s.TrustToken = val
|
||||
}
|
||||
if val := resp.Header.Get("scnt"); val != "" {
|
||||
s.Scnt = val
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Requires2FA returns true if the session requires 2FA
|
||||
func (s *Session) Requires2FA() bool {
|
||||
return s.AccountInfo.DsInfo.HsaVersion == 2 && s.AccountInfo.HsaChallengeRequired
|
||||
}
|
||||
|
||||
// SignIn signs in the session
|
||||
func (s *Session) SignIn(ctx context.Context, appleID, password string) error {
|
||||
trustTokens := []string{}
|
||||
if s.TrustToken != "" {
|
||||
trustTokens = []string{s.TrustToken}
|
||||
}
|
||||
values := map[string]any{
|
||||
"accountName": appleID,
|
||||
"password": password,
|
||||
"rememberMe": true,
|
||||
"trustTokens": trustTokens,
|
||||
}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/signin",
|
||||
Parameters: url.Values{},
|
||||
ExtraHeaders: s.GetAuthHeaders(map[string]string{}),
|
||||
RootURL: authEndpoint,
|
||||
IgnoreStatus: true, // need to handle 409 for hsa2
|
||||
NoResponse: true,
|
||||
Body: body,
|
||||
}
|
||||
opts.Parameters.Set("isRememberMeEnabled", "true")
|
||||
_, err = s.Request(ctx, opts, nil, nil)
|
||||
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// AuthWithToken authenticates the session
|
||||
func (s *Session) AuthWithToken(ctx context.Context) error {
|
||||
values := map[string]any{
|
||||
"accountCountryCode": s.AccountCountry,
|
||||
"dsWebAuthToken": s.SessionToken,
|
||||
"extended_login": true,
|
||||
"trustToken": s.TrustToken,
|
||||
}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/accountLogin",
|
||||
ExtraHeaders: GetCommonHeaders(map[string]string{}),
|
||||
RootURL: setupEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
resp, err := s.Request(ctx, opts, nil, &s.AccountInfo)
|
||||
if err == nil {
|
||||
s.Cookies = resp.Cookies()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate2FACode validates the 2FA code
|
||||
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
|
||||
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := s.GetAuthHeaders(map[string]string{})
|
||||
headers["scnt"] = s.Scnt
|
||||
headers["X-Apple-ID-Session-Id"] = s.SessionID
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/verify/trusteddevice/securitycode",
|
||||
ExtraHeaders: headers,
|
||||
RootURL: authEndpoint,
|
||||
Body: body,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
_, err = s.Request(ctx, opts, nil, nil)
|
||||
if err == nil {
|
||||
if err := s.TrustSession(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("validate2FACode failed: %w", err)
|
||||
}
|
||||
|
||||
// TrustSession trusts the session
|
||||
func (s *Session) TrustSession(ctx context.Context) error {
|
||||
headers := s.GetAuthHeaders(map[string]string{})
|
||||
headers["scnt"] = s.Scnt
|
||||
headers["X-Apple-ID-Session-Id"] = s.SessionID
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/2sv/trust",
|
||||
ExtraHeaders: headers,
|
||||
RootURL: authEndpoint,
|
||||
NoResponse: true,
|
||||
ContentLength: common.Int64(0),
|
||||
}
|
||||
|
||||
_, err := s.Request(ctx, opts, nil, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("trustSession failed: %w", err)
|
||||
}
|
||||
|
||||
return s.AuthWithToken(ctx)
|
||||
}
|
||||
|
||||
// ValidateSession validates the session
|
||||
func (s *Session) ValidateSession(ctx context.Context) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/validate",
|
||||
ExtraHeaders: s.GetHeaders(map[string]string{}),
|
||||
RootURL: setupEndpoint,
|
||||
ContentLength: common.Int64(0),
|
||||
}
|
||||
_, err := s.Request(ctx, opts, nil, &s.AccountInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("validateSession failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAuthHeaders returns the authentication headers for the session.
|
||||
//
|
||||
// It takes an `overwrite` map[string]string parameter which allows
|
||||
// overwriting the default headers. It returns a map[string]string.
|
||||
func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string {
|
||||
headers := map[string]string{
|
||||
"Accept": "application/json",
|
||||
"Content-Type": "application/json",
|
||||
"X-Apple-OAuth-Client-Id": s.ClientID,
|
||||
"X-Apple-OAuth-Client-Type": "firstPartyAuth",
|
||||
"X-Apple-OAuth-Redirect-URI": "https://www.icloud.com",
|
||||
"X-Apple-OAuth-Require-Grant-Code": "true",
|
||||
"X-Apple-OAuth-Response-Mode": "web_message",
|
||||
"X-Apple-OAuth-Response-Type": "code",
|
||||
"X-Apple-OAuth-State": s.ClientID,
|
||||
"X-Apple-Widget-Key": s.ClientID,
|
||||
"Origin": homeEndpoint,
|
||||
"Referer": fmt.Sprintf("%s/", homeEndpoint),
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
}
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// GetHeaders Gets the authentication headers required for a request
|
||||
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
|
||||
headers := GetCommonHeaders(map[string]string{})
|
||||
headers["Cookie"] = s.GetCookieString()
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// GetCookieString returns the cookie header string for the session.
|
||||
func (s *Session) GetCookieString() string {
|
||||
cookieHeader := ""
|
||||
// we only care about name and value.
|
||||
for _, cookie := range s.Cookies {
|
||||
cookieHeader = cookieHeader + cookie.Name + "=" + cookie.Value + ";"
|
||||
}
|
||||
return cookieHeader
|
||||
}
|
||||
|
||||
// GetCommonHeaders generates common HTTP headers with optional overwrite.
|
||||
func GetCommonHeaders(overwrite map[string]string) map[string]string {
|
||||
headers := map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
"Origin": baseEndpoint,
|
||||
"Referer": fmt.Sprintf("%s/", baseEndpoint),
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
}
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// MergeCookies merges two slices of http.Cookies, ensuring no duplicates are added.
|
||||
func MergeCookies(left []*http.Cookie, right []*http.Cookie) ([]*http.Cookie, error) {
|
||||
var hashes []string
|
||||
for _, cookie := range right {
|
||||
hashes = append(hashes, cookie.Raw)
|
||||
}
|
||||
for _, cookie := range left {
|
||||
if !slices.Contains(hashes, cookie.Raw) {
|
||||
right = append(right, cookie)
|
||||
}
|
||||
}
|
||||
return right, nil
|
||||
}
|
||||
|
||||
// GetCookiesForDomain filters the provided cookies based on the domain of the given URL.
|
||||
func GetCookiesForDomain(url *url.URL, cookies []*http.Cookie) ([]*http.Cookie, error) {
|
||||
var domainCookies []*http.Cookie
|
||||
for _, cookie := range cookies {
|
||||
if strings.HasSuffix(url.Host, cookie.Domain) {
|
||||
domainCookies = append(domainCookies, cookie)
|
||||
}
|
||||
}
|
||||
return domainCookies, nil
|
||||
}
|
||||
|
||||
// NewSession creates a new Session instance with default values.
|
||||
func NewSession() *Session {
|
||||
session := &Session{}
|
||||
session.srv = rest.NewClient(fshttp.NewClient(context.Background())).SetRoot(baseEndpoint)
|
||||
//session.ClientID = "auth-" + uuid.New().String()
|
||||
return session
|
||||
}
|
||||
|
||||
// AccountInfo represents an account info
|
||||
type AccountInfo struct {
|
||||
DsInfo *ValidateDataDsInfo `json:"dsInfo"`
|
||||
HasMinimumDeviceForPhotosWeb bool `json:"hasMinimumDeviceForPhotosWeb"`
|
||||
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||
Webservices map[string]*webService `json:"webservices"`
|
||||
PcsEnabled bool `json:"pcsEnabled"`
|
||||
TermsUpdateNeeded bool `json:"termsUpdateNeeded"`
|
||||
ConfigBag struct {
|
||||
Urls struct {
|
||||
AccountCreateUI string `json:"accountCreateUI"`
|
||||
AccountLoginUI string `json:"accountLoginUI"`
|
||||
AccountLogin string `json:"accountLogin"`
|
||||
AccountRepairUI string `json:"accountRepairUI"`
|
||||
DownloadICloudTerms string `json:"downloadICloudTerms"`
|
||||
RepairDone string `json:"repairDone"`
|
||||
AccountAuthorizeUI string `json:"accountAuthorizeUI"`
|
||||
VettingURLForEmail string `json:"vettingUrlForEmail"`
|
||||
AccountCreate string `json:"accountCreate"`
|
||||
GetICloudTerms string `json:"getICloudTerms"`
|
||||
VettingURLForPhone string `json:"vettingUrlForPhone"`
|
||||
} `json:"urls"`
|
||||
AccountCreateEnabled bool `json:"accountCreateEnabled"`
|
||||
} `json:"configBag"`
|
||||
HsaTrustedBrowser bool `json:"hsaTrustedBrowser"`
|
||||
AppsOrder []string `json:"appsOrder"`
|
||||
Version int `json:"version"`
|
||||
IsExtendedLogin bool `json:"isExtendedLogin"`
|
||||
PcsServiceIdentitiesIncluded bool `json:"pcsServiceIdentitiesIncluded"`
|
||||
IsRepairNeeded bool `json:"isRepairNeeded"`
|
||||
HsaChallengeRequired bool `json:"hsaChallengeRequired"`
|
||||
RequestInfo struct {
|
||||
Country string `json:"country"`
|
||||
TimeZone string `json:"timeZone"`
|
||||
Region string `json:"region"`
|
||||
} `json:"requestInfo"`
|
||||
PcsDeleted bool `json:"pcsDeleted"`
|
||||
ICloudInfo struct {
|
||||
SafariBookmarksHasMigratedToCloudKit bool `json:"SafariBookmarksHasMigratedToCloudKit"`
|
||||
} `json:"iCloudInfo"`
|
||||
Apps map[string]*ValidateDataApp `json:"apps"`
|
||||
}
|
||||
|
||||
// ValidateDataDsInfo represents an validation info
|
||||
type ValidateDataDsInfo struct {
|
||||
HsaVersion int `json:"hsaVersion"`
|
||||
LastName string `json:"lastName"`
|
||||
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||
TantorMigrated bool `json:"tantorMigrated"`
|
||||
Dsid string `json:"dsid"`
|
||||
HsaEnabled bool `json:"hsaEnabled"`
|
||||
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
|
||||
IroncadeMigrated bool `json:"ironcadeMigrated"`
|
||||
Locale string `json:"locale"`
|
||||
BrZoneConsolidated bool `json:"brZoneConsolidated"`
|
||||
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
|
||||
IsManagedAppleID bool `json:"isManagedAppleID"`
|
||||
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
|
||||
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
|
||||
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
|
||||
Gilligvited bool `json:"gilligvited"`
|
||||
AppleIDAliases []interface{} `json:"appleIdAliases"`
|
||||
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
|
||||
IsPaidDeveloper bool `json:"isPaidDeveloper"`
|
||||
CountryCode string `json:"countryCode"`
|
||||
NotificationID string `json:"notificationId"`
|
||||
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
|
||||
ADsID string `json:"aDsID"`
|
||||
Locked bool `json:"locked"`
|
||||
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
|
||||
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
|
||||
PrimaryEmail string `json:"primaryEmail"`
|
||||
AppleIDEntries []struct {
|
||||
IsPrimary bool `json:"isPrimary"`
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
} `json:"appleIdEntries"`
|
||||
GilliganEnabled bool `json:"gilligan-enabled"`
|
||||
IsWebAccessAllowed bool `json:"isWebAccessAllowed"`
|
||||
FullName string `json:"fullName"`
|
||||
MailFlags struct {
|
||||
IsThreadingAvailable bool `json:"isThreadingAvailable"`
|
||||
IsSearchV2Provisioned bool `json:"isSearchV2Provisioned"`
|
||||
SCKMail bool `json:"sCKMail"`
|
||||
IsMppSupportedInCurrentCountry bool `json:"isMppSupportedInCurrentCountry"`
|
||||
} `json:"mailFlags"`
|
||||
LanguageCode string `json:"languageCode"`
|
||||
AppleID string `json:"appleId"`
|
||||
HasUnreleasedOS bool `json:"hasUnreleasedOS"`
|
||||
AnalyticsOptInStatus bool `json:"analyticsOptInStatus"`
|
||||
FirstName string `json:"firstName"`
|
||||
ICloudAppleIDAlias string `json:"iCloudAppleIdAlias"`
|
||||
NotesMigrated bool `json:"notesMigrated"`
|
||||
BeneficiaryInfo struct {
|
||||
IsBeneficiary bool `json:"isBeneficiary"`
|
||||
} `json:"beneficiaryInfo"`
|
||||
HasPaymentInfo bool `json:"hasPaymentInfo"`
|
||||
PcsDelet bool `json:"pcsDelet"`
|
||||
AppleIDAlias string `json:"appleIdAlias"`
|
||||
BrMigrated bool `json:"brMigrated"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
FamilyEligible bool `json:"familyEligible"`
|
||||
}
|
||||
|
||||
// ValidateDataApp represents an app
|
||||
type ValidateDataApp struct {
|
||||
CanLaunchWithOneFactor bool `json:"canLaunchWithOneFactor"`
|
||||
IsQualifiedForBeta bool `json:"isQualifiedForBeta"`
|
||||
}
|
||||
|
||||
// WebService represents a web service
|
||||
type webService struct {
|
||||
PcsRequired bool `json:"pcsRequired"`
|
||||
URL string `json:"url"`
|
||||
UploadURL string `json:"uploadUrl"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
1174
backend/iclouddrive/iclouddrive.go
Normal file
1174
backend/iclouddrive/iclouddrive.go
Normal file
File diff suppressed because it is too large
Load Diff
18
backend/iclouddrive/iclouddrive_test.go
Normal file
18
backend/iclouddrive/iclouddrive_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
//go:build !plan9 && !solaris
|
||||
|
||||
package iclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/iclouddrive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestICloudDrive:",
|
||||
NilObject: (*iclouddrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
7
backend/iclouddrive/iclouddrive_unsupported.go
Normal file
7
backend/iclouddrive/iclouddrive_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Build for iclouddrive for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris
|
||||
|
||||
// Package iclouddrive implements the iCloud Drive backend
|
||||
package iclouddrive
|
||||
@@ -75,7 +75,7 @@ type MoveFolderParam struct {
|
||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||
}
|
||||
|
||||
// JobIDResponse respresents response struct with JobID for folder operations
|
||||
// JobIDResponse represents response struct with JobID for folder operations
|
||||
type JobIDResponse struct {
|
||||
JobID string `json:"jobId"`
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ func (ik *ImageKit) URL(params URLParam) (string, error) {
|
||||
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
|
||||
var path = strings.Replace(resultURL, endpoint, "", 1)
|
||||
|
||||
path = path + expires
|
||||
path += expires
|
||||
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
|
||||
mac.Write([]byte(path))
|
||||
signature := hex.EncodeToString(mac.Sum(nil))
|
||||
|
||||
@@ -151,6 +151,19 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
|
||||
Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.",
|
||||
Default: "https://archive.org",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "item_metadata",
|
||||
Help: `Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set.
|
||||
Format is key=value and the 'x-archive-meta-' prefix is automatically added.`,
|
||||
Default: []string{},
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "item_derive",
|
||||
Help: `Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload.
|
||||
The derive process produces a number of secondary files from an upload to make an upload more usable on the web.
|
||||
Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure.`,
|
||||
Default: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't ask the server to test against MD5 checksum calculated by rclone.
|
||||
@@ -201,6 +214,8 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
FrontEndpoint string `config:"front_endpoint"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
ItemMetadata []string `config:"item_metadata"`
|
||||
ItemDerive bool `config:"item_derive"`
|
||||
WaitArchive fs.Duration `config:"wait_archive"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
@@ -790,17 +805,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
"x-amz-filemeta-rclone-update-track": updateTracker,
|
||||
|
||||
// we add some more headers for intuitive actions
|
||||
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
||||
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
|
||||
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
|
||||
"x-archive-meta-mediatype": "data", // mark media type of the uploading file as "data"
|
||||
"x-archive-queue-derive": "0", // skip derivation process (e.g. encoding to smaller files, OCR on PDFs)
|
||||
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
|
||||
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
||||
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
|
||||
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
|
||||
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
|
||||
}
|
||||
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", size)
|
||||
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
|
||||
}
|
||||
|
||||
// This is IA's ITEM metadata, not file metadata
|
||||
headers, err = o.appendItemMetadataHeaders(headers, o.fs.opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var mdata fs.Metadata
|
||||
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err == nil && mdata != nil {
|
||||
@@ -863,6 +884,51 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *Object) appendItemMetadataHeaders(headers map[string]string, options Options) (newHeaders map[string]string, err error) {
|
||||
metadataCounter := make(map[string]int)
|
||||
metadataValues := make(map[string][]string)
|
||||
|
||||
// First pass: count occurrences and collect values
|
||||
for _, v := range options.ItemMetadata {
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return newHeaders, errors.New("item metadata key=value should be in the form key=value")
|
||||
}
|
||||
key, value := parts[0], parts[1]
|
||||
metadataCounter[key]++
|
||||
metadataValues[key] = append(metadataValues[key], value)
|
||||
}
|
||||
|
||||
// Second pass: add headers with appropriate prefixes
|
||||
for key, count := range metadataCounter {
|
||||
if count == 1 {
|
||||
// Only one occurrence, use x-archive-meta-
|
||||
headers[fmt.Sprintf("x-archive-meta-%s", key)] = metadataValues[key][0]
|
||||
} else {
|
||||
// Multiple occurrences, use x-archive-meta01-, x-archive-meta02-, etc.
|
||||
for i, value := range metadataValues[key] {
|
||||
headers[fmt.Sprintf("x-archive-meta%02d-%s", i+1, key)] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if o.fs.opt.ItemDerive {
|
||||
headers["x-archive-queue-derive"] = "1"
|
||||
} else {
|
||||
headers["x-archive-queue-derive"] = "0"
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Setting IA item derive: %t", o.fs.opt.ItemDerive)
|
||||
|
||||
for k, v := range headers {
|
||||
if strings.HasPrefix(k, "x-archive-meta") {
|
||||
fs.Debugf(o, "Setting IA item metadata: %s=%s", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
@@ -277,11 +277,9 @@ machines.`)
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -292,11 +290,9 @@ machines.`)
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -307,11 +303,9 @@ machines.`)
|
||||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -322,11 +316,9 @@ machines.`)
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -924,19 +916,17 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
},
|
||||
oauthConfig := &oauthutil.Config{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
}
|
||||
if ver == configVersion {
|
||||
oauthConfig.ClientID = defaultClientID
|
||||
// if custom endpoints are set use them else stick with defaults
|
||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||
oauthConfig.TokenURL = tokenURL
|
||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||
oauthConfig.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
@@ -950,8 +940,8 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
oauthConfig.Endpoint.TokenURL = legacyTokenURL
|
||||
oauthConfig.Endpoint.AuthURL = legacyTokenURL
|
||||
oauthConfig.TokenURL = legacyTokenURL
|
||||
oauthConfig.AuthURL = legacyTokenURL
|
||||
|
||||
// add the request filter to fix token refresh
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
@@ -1487,16 +1477,38 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := f.mkParentDir(ctx, remote); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.createTime, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
if err == nil {
|
||||
var createTime time.Time
|
||||
var createTimeMeta bool
|
||||
var modTime time.Time
|
||||
var modTimeMeta bool
|
||||
if meta != nil {
|
||||
createTime, createTimeMeta = srcObj.parseFsMetadataTime(meta, "btime")
|
||||
if !createTimeMeta {
|
||||
createTime = srcObj.createTime
|
||||
}
|
||||
modTime, modTimeMeta = srcObj.parseFsMetadataTime(meta, "mtime")
|
||||
if !modTimeMeta {
|
||||
modTime = srcObj.modTime
|
||||
}
|
||||
}
|
||||
if bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
// Workaround necessary when destination was a trashed file, to avoid the copied file also being in trash (bug in api?)
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
|
||||
} else if createTimeMeta || modTimeMeta {
|
||||
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -1523,12 +1535,30 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := f.mkParentDir(ctx, remote); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
||||
|
||||
if err == nil && meta != nil {
|
||||
createTime, createTimeMeta := srcObj.parseFsMetadataTime(meta, "btime")
|
||||
if !createTimeMeta {
|
||||
createTime = srcObj.createTime
|
||||
}
|
||||
modTime, modTimeMeta := srcObj.parseFsMetadataTime(meta, "mtime")
|
||||
if !modTimeMeta {
|
||||
modTime = srcObj.modTime
|
||||
}
|
||||
if createTimeMeta || modTimeMeta {
|
||||
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
@@ -1786,6 +1816,20 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
// parseFsMetadataTime parses a time string from fs.Metadata with key
|
||||
func (o *Object) parseFsMetadataTime(m fs.Metadata, key string) (t time.Time, ok bool) {
|
||||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
t, err = time.Parse(time.RFC3339Nano, value) // metadata stores RFC3339Nano timestamps
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
return t, ok
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
@@ -1957,21 +2001,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var createdTime string
|
||||
var modTime string
|
||||
if meta != nil {
|
||||
if v, ok := meta["btime"]; ok {
|
||||
t, err := time.Parse(time.RFC3339Nano, v) // metadata stores RFC3339Nano timestamps
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata btime: %q: %v", v, err)
|
||||
} else {
|
||||
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
|
||||
}
|
||||
if t, ok := o.parseFsMetadataTime(meta, "btime"); ok {
|
||||
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
|
||||
}
|
||||
if v, ok := meta["mtime"]; ok {
|
||||
t, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata mtime: %q: %v", v, err)
|
||||
} else {
|
||||
modTime = api.Rfc3339Time(t).String()
|
||||
}
|
||||
if t, ok := o.parseFsMetadataTime(meta, "mtime"); ok {
|
||||
modTime = api.Rfc3339Time(t).String()
|
||||
}
|
||||
}
|
||||
if modTime == "" { // prefer mtime in meta as Modified time, fallback to source ModTime
|
||||
|
||||
@@ -59,7 +59,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
//"utime" - read-only
|
||||
//"content-type" - read-only
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, false, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
|
||||
@@ -5,18 +5,18 @@ package local
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
var getFreeDiskSpace = windows.NewLazySystemDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
root, e := syscall.UTF16PtrFromString(f.root)
|
||||
root, e := windows.UTF16PtrFromString(f.root)
|
||||
if e != nil {
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e)
|
||||
}
|
||||
@@ -26,7 +26,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
)
|
||||
if e1 != syscall.Errno(0) {
|
||||
if e1 != windows.Errno(0) {
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
|
||||
93
backend/local/clone_darwin.go
Normal file
93
backend/local/clone_darwin.go
Normal file
@@ -0,0 +1,93 @@
|
||||
//go:build darwin && cgo
|
||||
|
||||
// Package local provides a filesystem interface
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-darwin/apfs"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// # This is stored with the remote path given
|
||||
//
|
||||
// # It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
if runtime.GOOS != "darwin" || f.opt.NoClone {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't clone - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to read metadata: %w", err)
|
||||
}
|
||||
|
||||
// Create destination
|
||||
dstObj := f.newObject(remote)
|
||||
err = dstObj.mkdirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcPath := srcObj.path
|
||||
if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead
|
||||
srcPath, err = filepath.EvalSymlinks(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = Clone(srcPath, f.localPath(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set metadata if --metadata is in use
|
||||
if meta != nil {
|
||||
err = dstObj.writeMetadata(meta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to set metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Clone uses APFS cloning if possible, otherwise falls back to copying (with full metadata preservation)
|
||||
// note that this is closely related to unix.Clonefile(src, dst, unix.CLONE_NOFOLLOW) but not 100% identical
|
||||
// https://opensource.apple.com/source/copyfile/copyfile-173.40.2/copyfile.c.auto.html
|
||||
func Clone(src, dst string) error {
|
||||
state := apfs.CopyFileStateAlloc()
|
||||
defer func() {
|
||||
if err := apfs.CopyFileStateFree(state); err != nil {
|
||||
fs.Errorf(dst, "free state error: %v", err)
|
||||
}
|
||||
}()
|
||||
cloned, err := apfs.CopyFile(src, dst, state, apfs.COPYFILE_CLONE)
|
||||
fs.Debugf(dst, "isCloned: %v, error: %v", cloned, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
||||
16
backend/local/lchmod.go
Normal file
16
backend/local/lchmod.go
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build windows || plan9 || js || linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
const haveLChmod = false
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// Can't do this safely on this OS - chmoding a symlink always
|
||||
// changes the destination.
|
||||
return nil
|
||||
}
|
||||
41
backend/local/lchmod_unix.go
Normal file
41
backend/local/lchmod_unix.go
Normal file
@@ -0,0 +1,41 @@
|
||||
//go:build !windows && !plan9 && !js && !linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChmod = true
|
||||
|
||||
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
||||
//
|
||||
// Borrowed from the syscall source since it isn't public.
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat
|
||||
// and returns ENOTSUP if you try, so we don't support this on linux
|
||||
if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lChmod", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build windows || plan9 || js
|
||||
//go:build plan9 || js
|
||||
|
||||
package local
|
||||
|
||||
|
||||
19
backend/local/lchtimes_windows.go
Normal file
19
backend/local/lchtimes_windows.go
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
return setTimes(name, atime, mtime, time.Time{}, true)
|
||||
}
|
||||
@@ -32,9 +32,10 @@ import (
|
||||
)
|
||||
|
||||
// Constants
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
const (
|
||||
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
)
|
||||
|
||||
// timeType allows the user to choose what exactly ModTime() returns
|
||||
type timeType = fs.Enum[timeTypeChoices]
|
||||
@@ -78,41 +79,44 @@ supported by all file systems) under the "user.*" prefix.
|
||||
Metadata is supported on files and directories.
|
||||
`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
|
||||
|
||||
@@ -122,11 +126,12 @@ Rclone used to use the Stat size of links as the link size, but this fails in qu
|
||||
|
||||
So rclone now always reads the link.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
|
||||
This flag can be used to normalize file names into unicode NFC form
|
||||
that are read from the local filesystem.
|
||||
@@ -140,11 +145,12 @@ some OSes.
|
||||
|
||||
Note that rclone compares filenames with unicode normalization in the sync
|
||||
routine so this flag shouldn't normally be used.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy -
|
||||
@@ -175,68 +181,96 @@ directory listing (where the initial stat value comes from on Windows)
|
||||
and when stat is called on them directly. Other copy tools always use
|
||||
the direct stat value and setting this flag will disable that.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_clone",
|
||||
Help: `Disable reflink cloning for server-side copies.
|
||||
|
||||
Normally, for local-to-local transfers, rclone will "clone" the file when
|
||||
possible, and fall back to "copying" only when cloning is not supported.
|
||||
|
||||
Cloning creates a shallow copy (or "reflink") which initially shares blocks with
|
||||
the original file. Unlike a "hardlink", the two files are independent and
|
||||
neither will affect the other if subsequently modified.
|
||||
|
||||
Cloning is usually preferable to copying, as it is much faster and is
|
||||
deduplicated by default (i.e. having two identical files does not consume more
|
||||
storage than having just one.) However, for use cases where data redundancy is
|
||||
preferable, --local-no-clone can be used to disable cloning and force "deep" copies.
|
||||
|
||||
Currently, cloning is only supported when using APFS on macOS (support for other
|
||||
platforms may be added in the future.)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
Stream) may incorrectly set the actual file size equal to the
|
||||
preallocated space, causing checksum and file size checks to fail.
|
||||
Use this flag to disable preallocation.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
|
||||
On Windows platforms rclone will make sparse files when doing
|
||||
multi-thread downloads. This avoids long pauses on large files where
|
||||
the OS zeros the file. However sparse files may be undesirable as they
|
||||
cause disk fragmentation and can be slow to work with.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
|
||||
Normally rclone updates modification time of files after they are done
|
||||
uploading. This can cause permissions issues on Linux platforms when
|
||||
the user rclone is running as does not own the file uploaded, such as
|
||||
when copying to a CIFS mount owned by another user. If this option is
|
||||
enabled, rclone will no longer update the modtime after copying a file.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
|
||||
Normally rclone does all operations on the mtime or Modification time.
|
||||
|
||||
@@ -255,27 +289,29 @@ will silently replace it with the modification time which all OSes support.
|
||||
Note that setting the time will still set the modified time so this is
|
||||
only useful for reading.
|
||||
`,
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
}},
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
},
|
||||
},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
@@ -296,6 +332,7 @@ type Options struct {
|
||||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
TimeType timeType `config:"time_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
NoClone bool `config:"no_clone"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
@@ -339,17 +376,22 @@ type Directory struct {
|
||||
|
||||
var (
|
||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
)
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Override --local-links with --links if set
|
||||
if ci.Links {
|
||||
opt.TranslateSymlinks = true
|
||||
}
|
||||
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
@@ -384,6 +426,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
if opt.NoClone {
|
||||
// Disable server-side copy when --local-no-clone is set
|
||||
f.features.Copy = nil
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
@@ -391,9 +437,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
// Check to see if this is a .rclonelink if not found
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix)
|
||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix))
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
@@ -464,8 +510,8 @@ func (f *Fs) caseInsensitive() bool {
|
||||
//
|
||||
// for regular files, localPath is returned unchanged
|
||||
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
|
||||
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix)
|
||||
return newLocalPath, isTranslatedLink
|
||||
}
|
||||
|
||||
@@ -648,7 +694,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
newRemote += fs.LinkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
@@ -1555,33 +1601,60 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
} else {
|
||||
s = filepath.Clean(s)
|
||||
}
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
err := o.writeMetadata(metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("SetMetadata failed on Object: %w", err)
|
||||
}
|
||||
// Re-read info now we have finished setting stuff
|
||||
return o.lstat()
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
var vol string
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
vol = filepath.VolumeName(s)
|
||||
if vol == `\\?` && len(s) >= 6 {
|
||||
// `\\?\C:`
|
||||
vol = s[:6]
|
||||
}
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
s = s[len(vol):]
|
||||
}
|
||||
// Don't use FromStandardPath. Make sure Dot (`.`, `..`) as name will not be reencoded
|
||||
// Take care of the case Standard: ././‛. (the first dot means current directory)
|
||||
if enc != encoder.Standard {
|
||||
s = filepath.ToSlash(s)
|
||||
parts := strings.Split(s, "/")
|
||||
encoded := make([]string, len(parts))
|
||||
changed := false
|
||||
for i, p := range parts {
|
||||
if (p == ".") || (p == "..") {
|
||||
encoded[i] = p
|
||||
continue
|
||||
}
|
||||
part := enc.FromStandardName(p)
|
||||
changed = changed || part != p
|
||||
encoded[i] = part
|
||||
}
|
||||
if changed {
|
||||
s = strings.Join(encoded, "/")
|
||||
}
|
||||
s = filepath.FromSlash(s)
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
s = vol + s
|
||||
}
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
if !noUNC {
|
||||
// Convert to UNC. It does nothing on non windows platforms.
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -1629,6 +1702,7 @@ var (
|
||||
_ fs.MkdirMetadataer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
_ fs.SetMetadataer = &Object{}
|
||||
_ fs.Directory = &Directory{}
|
||||
_ fs.SetModTimer = &Directory{}
|
||||
_ fs.SetMetadataer = &Directory{}
|
||||
|
||||
@@ -73,7 +73,6 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
// Test corrupted on transfer
|
||||
@@ -111,7 +110,7 @@ func TestSymlink(t *testing.T) {
|
||||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2)
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
@@ -140,7 +139,7 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false)
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
@@ -156,9 +155,9 @@ func TestSymlink(t *testing.T) {
|
||||
assert.Equal(t, "file.txt", linkText)
|
||||
|
||||
// Check that NewObject gets the correct object
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote())
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
@@ -166,7 +165,7 @@ func TestSymlink(t *testing.T) {
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check that NewFs works with the suffixed version and --links
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
@@ -224,7 +223,7 @@ func TestHashOnUpdate(t *testing.T) {
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
b := bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
@@ -269,22 +268,66 @@ func TestMetadata(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
r.WriteFile(filePath, "metadata file contents", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Set fs into "-l" / "--links" mode
|
||||
f.opt.TranslateSymlinks = true
|
||||
|
||||
// Write a symlink to the file
|
||||
symlinkPath := "metafile-link.txt"
|
||||
osSymlinkPath := filepath.Join(f.root, symlinkPath)
|
||||
symlinkPath += fs.LinkSuffix
|
||||
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
|
||||
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
|
||||
|
||||
// Get the object
|
||||
obj, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
// Get the symlink object
|
||||
symlinkObj, err := f.NewObject(ctx, symlinkPath)
|
||||
require.NoError(t, err)
|
||||
symlinkO := symlinkObj.(*Object)
|
||||
|
||||
// Record metadata for o
|
||||
oMeta, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test symlink first to check it doesn't mess up file
|
||||
t.Run("Symlink", func(t *testing.T) {
|
||||
testMetadata(t, r, symlinkO, symlinkModTime)
|
||||
})
|
||||
|
||||
// Read it again
|
||||
oMetaNew, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that operating on the symlink didn't change the file it was pointing to
|
||||
// See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file")
|
||||
|
||||
// Now run the same tests on the file
|
||||
t.Run("File", func(t *testing.T) {
|
||||
testMetadata(t, r, o, when)
|
||||
})
|
||||
}
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
features := f.Features()
|
||||
|
||||
var hasXID, hasAtime, hasBtime bool
|
||||
var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "linux":
|
||||
hasXID, hasAtime, hasBtime = true, true, true
|
||||
canSetXattrOnLinks = runtime.GOOS != "linux"
|
||||
case "openbsd", "solaris":
|
||||
hasXID, hasAtime = true, true
|
||||
case "windows":
|
||||
@@ -307,6 +350,10 @@ func TestMetadata(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS")
|
||||
}
|
||||
|
||||
inM := fs.Metadata{
|
||||
"potato": "chips",
|
||||
"cabbage": "soup",
|
||||
@@ -321,18 +368,21 @@ func TestMetadata(t *testing.T) {
|
||||
})
|
||||
|
||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||
t.Helper()
|
||||
mt, ok := o.parseMetadataTime(m, key)
|
||||
assert.True(t, ok)
|
||||
dt := mt.Sub(when)
|
||||
precision := time.Second
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when))
|
||||
}
|
||||
|
||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||
t.Helper()
|
||||
value, ok := o.parseMetadataInt(m, key, base)
|
||||
assert.True(t, ok)
|
||||
return value
|
||||
}
|
||||
|
||||
t.Run("Read", func(t *testing.T) {
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -342,13 +392,12 @@ func TestMetadata(t *testing.T) {
|
||||
checkInt(m, "mode", 8)
|
||||
checkTime(m, "mtime", when)
|
||||
|
||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||
|
||||
if hasAtime {
|
||||
if hasAtime && !o.translatedLink { // symlinks generally don't record atime
|
||||
checkTime(m, "atime", when)
|
||||
}
|
||||
if hasBtime {
|
||||
if hasBtime && !o.translatedLink { // symlinks generally don't record btime
|
||||
checkTime(m, "btime", when)
|
||||
}
|
||||
if hasXID {
|
||||
@@ -372,6 +421,10 @@ func TestMetadata(t *testing.T) {
|
||||
"mode": "0767",
|
||||
"potato": "wedges",
|
||||
}
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
// Don't change xattr if not supported on symlinks
|
||||
delete(newM, "potato")
|
||||
}
|
||||
err := o.writeMetadata(newM)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -381,7 +434,11 @@ func TestMetadata(t *testing.T) {
|
||||
|
||||
mode := checkInt(m, "mode", 8)
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||
expectedMode := 0767
|
||||
if o.translatedLink && runtime.GOOS == "linux" {
|
||||
expectedMode = 0777 // perms of symlinks always read as 0777 on linux
|
||||
}
|
||||
assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777))
|
||||
}
|
||||
|
||||
checkTime(m, "mtime", newMtime)
|
||||
@@ -391,11 +448,10 @@ func TestMetadata(t *testing.T) {
|
||||
if haveSetBTime {
|
||||
checkTime(m, "btime", newBtime)
|
||||
}
|
||||
if xattrSupported {
|
||||
if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) {
|
||||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
@@ -572,4 +628,35 @@ func TestCopySymlink(t *testing.T) {
|
||||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
|
||||
// Set fs into "-L/--copy-links" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst2"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err = f.NewObject(ctx, "src/link.txt")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a NON-symlink and it has the right contents
|
||||
dstPath = filepath.Join(r.LocalName, "dst2", "link.txt")
|
||||
fi, err := os.Lstat(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.Mode()&os.ModeSymlink == 0)
|
||||
want := fstest.NewItem("dst2/link.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
|
||||
// Test that copying a normal file also works
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
want = fstest.NewItem("dst2/file.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@@ -72,12 +73,12 @@ func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result i
|
||||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
result64, err := strconv.ParseInt(value, base, 64)
|
||||
parsed, err := strconv.ParseInt(value, base, 0)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
result = int(result64)
|
||||
result = int(parsed)
|
||||
}
|
||||
return result, ok
|
||||
}
|
||||
@@ -104,7 +105,11 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
}
|
||||
if haveSetBTime {
|
||||
if btimeOK {
|
||||
err = setBTime(o.path, btime)
|
||||
if o.translatedLink {
|
||||
err = lsetBTime(o.path, btime)
|
||||
} else {
|
||||
err = setBTime(o.path, btime)
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
||||
}
|
||||
@@ -120,7 +125,11 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
if o.translatedLink {
|
||||
err = os.Lchown(o.path, uid, gid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
||||
}
|
||||
@@ -128,9 +137,23 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
}
|
||||
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
|
||||
if hasMode {
|
||||
err = os.Chmod(o.path, os.FileMode(mode))
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
if mode >= 0 {
|
||||
umode := uint(mode)
|
||||
if umode <= math.MaxUint32 {
|
||||
if o.translatedLink {
|
||||
if haveLChmod {
|
||||
err = lChmod(o.path, os.FileMode(umode))
|
||||
} else {
|
||||
fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode))
|
||||
err = nil
|
||||
}
|
||||
} else {
|
||||
err = os.Chmod(o.path, os.FileMode(umode))
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// FIXME not parsing rdev yet
|
||||
|
||||
@@ -13,3 +13,9 @@ func setBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,15 +9,20 @@ import (
|
||||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
// setTimes sets any of atime, mtime or btime
|
||||
// if link is set it sets a link rather than the target
|
||||
func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error) {
|
||||
pathp, err := syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileFlag := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
||||
if link {
|
||||
fileFlag |= syscall.FILE_FLAG_OPEN_REPARSE_POINT
|
||||
}
|
||||
h, err := syscall.CreateFile(pathp,
|
||||
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
||||
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
syscall.OPEN_EXISTING, fileFlag, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -27,6 +32,28 @@ func setBTime(name string, btime time.Time) (err error) {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
|
||||
return syscall.SetFileTime(h, &bFileTime, nil, nil)
|
||||
var patime, pmtime, pbtime *syscall.Filetime
|
||||
if !atime.IsZero() {
|
||||
t := syscall.NsecToFiletime(atime.UnixNano())
|
||||
patime = &t
|
||||
}
|
||||
if !mtime.IsZero() {
|
||||
t := syscall.NsecToFiletime(mtime.UnixNano())
|
||||
pmtime = &t
|
||||
}
|
||||
if !btime.IsZero() {
|
||||
t := syscall.NsecToFiletime(btime.UnixNano())
|
||||
pbtime = &t
|
||||
}
|
||||
return syscall.SetFileTime(h, pbtime, patime, pmtime)
|
||||
}
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, false)
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, true)
|
||||
}
|
||||
|
||||
@@ -46,8 +46,8 @@ import (
|
||||
|
||||
// Global constants
|
||||
const (
|
||||
minSleepPacer = 10 * time.Millisecond
|
||||
maxSleepPacer = 2 * time.Second
|
||||
minSleepPacer = 100 * time.Millisecond
|
||||
maxSleepPacer = 5 * time.Second
|
||||
decayConstPacer = 2 // bigger for slower decay, exponential
|
||||
metaExpirySec = 20 * 60 // meta server expiration time
|
||||
serverExpirySec = 3 * 60 // download server expiration time
|
||||
@@ -68,14 +68,12 @@ var (
|
||||
)
|
||||
|
||||
// Description of how to authorize
|
||||
var oauthConfig = &oauth2.Config{
|
||||
var oauthConfig = &oauthutil.Config{
|
||||
ClientID: api.OAuthClientID,
|
||||
ClientSecret: "",
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: api.OAuthURL,
|
||||
TokenURL: api.OAuthURL,
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
AuthURL: api.OAuthURL,
|
||||
TokenURL: api.OAuthURL,
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
@@ -438,7 +436,9 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
||||
if err != nil || !tokenIsValid(t) {
|
||||
fs.Infof(f, "Valid token not found, authorizing.")
|
||||
ctx := oauthutil.Context(ctx, f.cli)
|
||||
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||
|
||||
oauth2Conf := oauthConfig.MakeOauth2Config()
|
||||
t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||
}
|
||||
if err == nil && !tokenIsValid(t) {
|
||||
err = errors.New("invalid token")
|
||||
|
||||
@@ -923,9 +923,7 @@ func (f *Fs) netStorageStatRequest(ctx context.Context, URL string, directory bo
|
||||
entrywanted := (directory && files[i].Type == "dir") ||
|
||||
(!directory && files[i].Type != "dir")
|
||||
if entrywanted {
|
||||
filestamp := files[0]
|
||||
files[0] = files[i]
|
||||
files[i] = filestamp
|
||||
files[0], files[i] = files[i], files[0]
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
|
||||
@@ -42,6 +42,8 @@ var _ error = (*Error)(nil)
|
||||
type Identity struct {
|
||||
DisplayName string `json:"displayName,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Email string `json:"email,omitempty"` // not officially documented, but seems to sometimes exist
|
||||
LoginName string `json:"loginName,omitempty"` // SharePoint only
|
||||
}
|
||||
|
||||
// IdentitySet is a keyed collection of Identity objects. It is used
|
||||
@@ -51,6 +53,9 @@ type IdentitySet struct {
|
||||
User Identity `json:"user,omitempty"`
|
||||
Application Identity `json:"application,omitempty"`
|
||||
Device Identity `json:"device,omitempty"`
|
||||
Group Identity `json:"group,omitempty"`
|
||||
SiteGroup Identity `json:"siteGroup,omitempty"` // The SharePoint group associated with this action. Optional.
|
||||
SiteUser Identity `json:"siteUser,omitempty"` // The SharePoint user associated with this action. Optional.
|
||||
}
|
||||
|
||||
// Quota groups storage space quota-related information on OneDrive into a single structure.
|
||||
@@ -197,9 +202,14 @@ type SharingLinkType struct {
|
||||
type LinkType string
|
||||
|
||||
const (
|
||||
ViewLinkType LinkType = "view" // ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
EditLinkType LinkType = "edit" // EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EmbedLinkType LinkType = "embed" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed content into a host webpage. Embed links are not available for OneDrive for Business or SharePoint.
|
||||
// ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
ViewLinkType LinkType = "view"
|
||||
// EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EditLinkType LinkType = "edit"
|
||||
// EmbedLinkType (role: read) A view-only sharing link that can be used to embed
|
||||
// content into a host webpage. Embed links are not available for OneDrive for
|
||||
// Business or SharePoint.
|
||||
EmbedLinkType LinkType = "embed"
|
||||
)
|
||||
|
||||
// LinkScope represents the scope of the link represented by this permission.
|
||||
@@ -207,32 +217,41 @@ const (
|
||||
type LinkScope string
|
||||
|
||||
const (
|
||||
AnonymousScope LinkScope = "anonymous" // AnonymousScope = Anyone with the link has access, without needing to sign in. This may include people outside of your organization.
|
||||
OrganizationScope LinkScope = "organization" // OrganizationScope = Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
|
||||
// AnonymousScope = Anyone with the link has access, without needing to sign in.
|
||||
// This may include people outside of your organization.
|
||||
AnonymousScope LinkScope = "anonymous"
|
||||
// OrganizationScope = Anyone signed into your organization (tenant) can use the
|
||||
// link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
OrganizationScope LinkScope = "organization"
|
||||
)
|
||||
|
||||
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
|
||||
// Sharing permissions have a number of different forms. The Permission resource represents these different forms through facets on the resource.
|
||||
type PermissionsType struct {
|
||||
ID string `json:"id"` // The unique identifier of the permission among all permissions on the item. Read-only.
|
||||
GrantedTo *IdentitySet `json:"grantedTo,omitempty"` // For user type permissions, the details of the users & applications for this permission. Read-only.
|
||||
GrantedToIdentities []*IdentitySet `json:"grantedToIdentities,omitempty"` // For link type permissions, the details of the users to whom permission was granted. Read-only.
|
||||
Invitation *SharingInvitationType `json:"invitation,omitempty"` // Details of any associated sharing invitation for this permission. Read-only.
|
||||
InheritedFrom *ItemReference `json:"inheritedFrom,omitempty"` // Provides a reference to the ancestor of the current permission, if it is inherited from an ancestor. Read-only.
|
||||
Link *SharingLinkType `json:"link,omitempty"` // Provides the link details of the current permission, if it is a link type permissions. Read-only.
|
||||
Roles []Role `json:"roles,omitempty"` // The type of permission (read, write, owner, member). Read-only.
|
||||
ShareID string `json:"shareId,omitempty"` // A unique token that can be used to access this shared item via the shares API. Read-only.
|
||||
ID string `json:"id"` // The unique identifier of the permission among all permissions on the item. Read-only.
|
||||
GrantedTo *IdentitySet `json:"grantedTo,omitempty"` // For user type permissions, the details of the users & applications for this permission. Read-only. Deprecated on OneDrive Business only.
|
||||
GrantedToIdentities []*IdentitySet `json:"grantedToIdentities,omitempty"` // For link type permissions, the details of the users to whom permission was granted. Read-only. Deprecated on OneDrive Business only.
|
||||
GrantedToV2 *IdentitySet `json:"grantedToV2,omitempty"` // For user type permissions, the details of the users & applications for this permission. Read-only. Not available for OneDrive Personal.
|
||||
GrantedToIdentitiesV2 []*IdentitySet `json:"grantedToIdentitiesV2,omitempty"` // For link type permissions, the details of the users to whom permission was granted. Read-only. Not available for OneDrive Personal.
|
||||
Invitation *SharingInvitationType `json:"invitation,omitempty"` // Details of any associated sharing invitation for this permission. Read-only.
|
||||
InheritedFrom *ItemReference `json:"inheritedFrom,omitempty"` // Provides a reference to the ancestor of the current permission, if it is inherited from an ancestor. Read-only.
|
||||
Link *SharingLinkType `json:"link,omitempty"` // Provides the link details of the current permission, if it is a link type permissions. Read-only.
|
||||
Roles []Role `json:"roles,omitempty"` // The type of permission (read, write, owner, member). Read-only.
|
||||
ShareID string `json:"shareId,omitempty"` // A unique token that can be used to access this shared item via the shares API. Read-only.
|
||||
}
|
||||
|
||||
// Role represents the type of permission (read, write, owner, member)
|
||||
type Role string
|
||||
|
||||
const (
|
||||
ReadRole Role = "read" // ReadRole provides the ability to read the metadata and contents of the item.
|
||||
WriteRole Role = "write" // WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
OwnerRole Role = "owner" // OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member" // MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
// ReadRole provides the ability to read the metadata and contents of the item.
|
||||
ReadRole Role = "read"
|
||||
// WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
WriteRole Role = "write"
|
||||
// OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
OwnerRole Role = "owner"
|
||||
// MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member"
|
||||
)
|
||||
|
||||
// PermissionsResponse is the response to the list permissions method
|
||||
@@ -592,3 +611,25 @@ type SiteResource struct {
|
||||
type SiteResponse struct {
|
||||
Sites []SiteResource `json:"value"`
|
||||
}
|
||||
|
||||
// GetGrantedTo returns the GrantedTo property.
|
||||
// This is to get around the odd problem of
|
||||
// GrantedTo being deprecated on OneDrive Business, while
|
||||
// GrantedToV2 is unavailable on OneDrive Personal.
|
||||
func (p *PermissionsType) GetGrantedTo(driveType string) *IdentitySet {
|
||||
if driveType == "personal" {
|
||||
return p.GrantedTo
|
||||
}
|
||||
return p.GrantedToV2
|
||||
}
|
||||
|
||||
// GetGrantedToIdentities returns the GrantedToIdentities property.
|
||||
// This is to get around the odd problem of
|
||||
// GrantedToIdentities being deprecated on OneDrive Business, while
|
||||
// GrantedToIdentitiesV2 is unavailable on OneDrive Personal.
|
||||
func (p *PermissionsType) GetGrantedToIdentities(driveType string) []*IdentitySet {
|
||||
if driveType == "personal" {
|
||||
return p.GrantedToIdentities
|
||||
}
|
||||
return p.GrantedToIdentitiesV2
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -14,7 +15,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -133,6 +133,7 @@ func (rwChoices) Choices() []fs.BitsChoicesInfo {
|
||||
{Bit: uint64(rwOff), Name: "off"},
|
||||
{Bit: uint64(rwRead), Name: "read"},
|
||||
{Bit: uint64(rwWrite), Name: "write"},
|
||||
{Bit: uint64(rwFailOK), Name: "failok"},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,6 +143,7 @@ type rwChoice = fs.Bits[rwChoices]
|
||||
const (
|
||||
rwRead rwChoice = 1 << iota
|
||||
rwWrite
|
||||
rwFailOK
|
||||
rwOff rwChoice = 0
|
||||
)
|
||||
|
||||
@@ -158,6 +160,9 @@ var rwExamples = fs.OptionExamples{{
|
||||
}, {
|
||||
Value: (rwRead | rwWrite).String(),
|
||||
Help: "Read and Write the value.",
|
||||
}, {
|
||||
Value: rwFailOK.String(),
|
||||
Help: "If writing fails log errors only, don't fail the transfer",
|
||||
}}
|
||||
|
||||
// Metadata describes metadata properties shared by both Objects and Directories
|
||||
@@ -363,6 +368,15 @@ func (m *Metadata) WritePermissions(ctx context.Context) (err error) {
|
||||
if m.normalizedID == "" {
|
||||
return errors.New("internal error: normalizedID is missing")
|
||||
}
|
||||
if m.fs.opt.MetadataPermissions.IsSet(rwFailOK) {
|
||||
// If failok is set, allow the permissions setting to fail and only log an ERROR
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Errorf(m.fs, "Ignoring error as failok is set: %v", err)
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// compare current to queued and sort into add/update/remove queues
|
||||
add, update, remove := m.sortPermissions()
|
||||
@@ -396,7 +410,7 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
|
||||
if n.ID != "" {
|
||||
// sanity check: ensure there's a matching "old" id with a non-matching role
|
||||
if !slices.ContainsFunc(old, func(o *api.PermissionsType) bool {
|
||||
return o.ID == n.ID && slices.Compare(o.Roles, n.Roles) != 0 && len(o.Roles) > 0 && len(n.Roles) > 0
|
||||
return o.ID == n.ID && slices.Compare(o.Roles, n.Roles) != 0 && len(o.Roles) > 0 && len(n.Roles) > 0 && !slices.Contains(o.Roles, api.OwnerRole)
|
||||
}) {
|
||||
fs.Debugf(m.remote, "skipping update for invalid roles: %v (perm ID: %v)", n.Roles, n.ID)
|
||||
continue
|
||||
@@ -418,6 +432,10 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
|
||||
}
|
||||
}
|
||||
for _, o := range old {
|
||||
if slices.Contains(o.Roles, api.OwnerRole) {
|
||||
fs.Debugf(m.remote, "skipping remove permission -- can't remove 'owner' role")
|
||||
continue
|
||||
}
|
||||
newHasOld := slices.ContainsFunc(new, func(n *api.PermissionsType) bool {
|
||||
if n == nil || n.ID == "" {
|
||||
return false // can't remove perms without an ID
|
||||
@@ -471,13 +489,13 @@ func (m *Metadata) processPermissions(ctx context.Context, add, update, remove [
|
||||
}
|
||||
|
||||
// fillRecipients looks for recipients to add from the permission passed in.
|
||||
// It looks for an email address in identity.User.ID and DisplayName, otherwise it uses the identity.User.ID as r.ObjectID.
|
||||
// It looks for an email address in identity.User.Email, ID, and DisplayName, otherwise it uses the identity.User.ID as r.ObjectID.
|
||||
// It considers both "GrantedTo" and "GrantedToIdentities".
|
||||
func fillRecipients(p *api.PermissionsType) (recipients []api.DriveRecipient) {
|
||||
func fillRecipients(p *api.PermissionsType, driveType string) (recipients []api.DriveRecipient) {
|
||||
if p == nil {
|
||||
return recipients
|
||||
}
|
||||
ids := make(map[string]struct{}, len(p.GrantedToIdentities)+1)
|
||||
ids := make(map[string]struct{}, len(p.GetGrantedToIdentities(driveType))+1)
|
||||
isUnique := func(s string) bool {
|
||||
_, ok := ids[s]
|
||||
return !ok && s != ""
|
||||
@@ -487,7 +505,10 @@ func fillRecipients(p *api.PermissionsType) (recipients []api.DriveRecipient) {
|
||||
r := api.DriveRecipient{}
|
||||
|
||||
id := ""
|
||||
if strings.ContainsRune(identity.User.ID, '@') {
|
||||
if strings.ContainsRune(identity.User.Email, '@') {
|
||||
id = identity.User.Email
|
||||
r.Email = id
|
||||
} else if strings.ContainsRune(identity.User.ID, '@') {
|
||||
id = identity.User.ID
|
||||
r.Email = id
|
||||
} else if strings.ContainsRune(identity.User.DisplayName, '@') {
|
||||
@@ -503,12 +524,31 @@ func fillRecipients(p *api.PermissionsType) (recipients []api.DriveRecipient) {
|
||||
ids[id] = struct{}{}
|
||||
recipients = append(recipients, r)
|
||||
}
|
||||
for _, identity := range p.GrantedToIdentities {
|
||||
addRecipient(identity)
|
||||
|
||||
forIdentitySet := func(iSet *api.IdentitySet) {
|
||||
if iSet == nil {
|
||||
return
|
||||
}
|
||||
iS := *iSet
|
||||
forIdentity := func(i api.Identity) {
|
||||
if i != (api.Identity{}) {
|
||||
iS.User = i
|
||||
addRecipient(&iS)
|
||||
}
|
||||
}
|
||||
forIdentity(iS.User)
|
||||
forIdentity(iS.SiteUser)
|
||||
forIdentity(iS.Group)
|
||||
forIdentity(iS.SiteGroup)
|
||||
forIdentity(iS.Application)
|
||||
forIdentity(iS.Device)
|
||||
}
|
||||
if p.GrantedTo != nil && p.GrantedTo.User != (api.Identity{}) {
|
||||
addRecipient(p.GrantedTo)
|
||||
|
||||
for _, identitySet := range p.GetGrantedToIdentities(driveType) {
|
||||
forIdentitySet(identitySet)
|
||||
}
|
||||
forIdentitySet(p.GetGrantedTo(driveType))
|
||||
|
||||
return recipients
|
||||
}
|
||||
|
||||
@@ -518,7 +558,7 @@ func (m *Metadata) addPermission(ctx context.Context, p *api.PermissionsType) (n
|
||||
opts := m.fs.newOptsCall(m.normalizedID, "POST", "/invite")
|
||||
|
||||
req := &api.AddPermissionsRequest{
|
||||
Recipients: fillRecipients(p),
|
||||
Recipients: fillRecipients(p, m.fs.driveType),
|
||||
RequireSignIn: m.fs.driveType != driveTypePersonal, // personal and business have conflicting requirements
|
||||
Roles: p.Roles,
|
||||
}
|
||||
|
||||
@@ -109,7 +109,8 @@ To update an existing permission, include both the Permission ID and the new
|
||||
`roles` to be assigned. `roles` is the only property that can be changed.
|
||||
|
||||
To remove permissions, pass in a blob containing only the permissions you wish
|
||||
to keep (which can be empty, to remove all.)
|
||||
to keep (which can be empty, to remove all.) Note that the `owner` role will be
|
||||
ignored, as it cannot be removed.
|
||||
|
||||
Note that both reading and writing permissions requires extra API calls, so if
|
||||
you don't need to read or write permissions it is recommended to omit
|
||||
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -65,14 +64,21 @@ const (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
authPath = "/common/oauth2/v2.0/authorize"
|
||||
tokenPath = "/common/oauth2/v2.0/token"
|
||||
|
||||
// Define the paths used for token operations
|
||||
commonPathPrefix = "/common" // prefix for the paths if tenant isn't known
|
||||
authPath = "/oauth2/v2.0/authorize"
|
||||
tokenPath = "/oauth2/v2.0/token"
|
||||
|
||||
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
|
||||
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
// When using client credential OAuth flow, scope of .default is required in order
|
||||
// to use the permissions configured for the application within the tenant
|
||||
scopeAccessClientCred = fs.SpaceSepList{".default"}
|
||||
|
||||
// Base config for how to auth
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: scopeAccess,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
@@ -125,7 +131,7 @@ func init() {
|
||||
Help: "Microsoft Cloud for US Government",
|
||||
}, {
|
||||
Value: regionDE,
|
||||
Help: "Microsoft Cloud Germany",
|
||||
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
@@ -183,6 +189,14 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
|
||||
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: `ID of the service principal's tenant. Also called its directory ID.
|
||||
|
||||
Set this if using
|
||||
- Client Credential flow
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
@@ -241,6 +255,18 @@ modification time and removes all but the last version.
|
||||
this flag there.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: `Permanently delete files on removal.
|
||||
|
||||
Normally files will get sent to the recycle bin on deletion. Setting
|
||||
this flag causes them to be permanently deleted. Use with care.
|
||||
|
||||
OneDrive personal accounts do not support the permanentDelete API,
|
||||
it only applies to OneDrive for Business and SharePoint document libraries.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "link_scope",
|
||||
Default: "anonymous",
|
||||
@@ -515,28 +541,54 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
|
||||
})
|
||||
}
|
||||
|
||||
// Config the backend
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
region, graphURL := getRegionURL(m)
|
||||
// Make the oauth config for the backend
|
||||
func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) {
|
||||
// Copy the default oauthConfig
|
||||
oauthConfig := *oauthConfig
|
||||
|
||||
if config.State == "" {
|
||||
var accessScopes fs.SpaceSepList
|
||||
accessScopesString, _ := m.Get("access_scopes")
|
||||
err := accessScopes.Set(accessScopesString)
|
||||
// Set the scopes
|
||||
oauthConfig.Scopes = opt.AccessScopes
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
|
||||
// Construct the auth URLs
|
||||
prefix := commonPathPrefix
|
||||
if opt.Tenant != "" {
|
||||
prefix = "/" + opt.Tenant
|
||||
}
|
||||
oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath
|
||||
oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath
|
||||
|
||||
// Check to see if we are using client credentials flow
|
||||
if opt.ClientCredentials {
|
||||
// Override scope to .default
|
||||
oauthConfig.Scopes = scopeAccessClientCred
|
||||
if opt.Tenant == "" {
|
||||
return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials)
|
||||
}
|
||||
}
|
||||
|
||||
return &oauthConfig, nil
|
||||
}
|
||||
|
||||
// Config the backend
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, graphURL := getRegionURL(m)
|
||||
|
||||
// Check to see if this is the start of the state machine execution
|
||||
if conf.State == "" {
|
||||
conf, err := makeOauthConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
|
||||
}
|
||||
oauthConfig.Scopes = []string(accessScopes)
|
||||
disableSitePermission, _ := m.Get("disable_site_permission")
|
||||
if disableSitePermission == "true" {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
return nil, err
|
||||
}
|
||||
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
OAuth2Config: conf,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -544,9 +596,11 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
|
||||
}
|
||||
|
||||
// Create a REST client, build on the OAuth client created above
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
switch config.State {
|
||||
switch conf.State {
|
||||
case "choose_type":
|
||||
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
|
||||
Value: "onedrive",
|
||||
@@ -572,7 +626,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
}})
|
||||
case "choose_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
return fs.ConfigGoto(conf.Result)
|
||||
case "onedrive":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
opts: rest.Opts{
|
||||
@@ -590,16 +644,22 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
},
|
||||
})
|
||||
case "driveid":
|
||||
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||
out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
// Default the drive_id to the previous version in the config
|
||||
out.Option.Default, _ = m.Get("drive_id")
|
||||
return out, nil
|
||||
case "driveid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
finalDriveID: config.Result,
|
||||
finalDriveID: conf.Result,
|
||||
})
|
||||
case "siteid":
|
||||
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
|
||||
case "siteid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
siteID: conf.Result,
|
||||
})
|
||||
case "url":
|
||||
return fs.ConfigInput("url_end", "config_site_url", `Site URL
|
||||
@@ -610,7 +670,7 @@ Examples:
|
||||
- "https://XXX.sharepoint.com/teams/ID"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
siteURL := conf.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
@@ -625,12 +685,12 @@ Examples:
|
||||
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
|
||||
case "path_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: config.Result,
|
||||
relativePath: conf.Result,
|
||||
})
|
||||
case "search":
|
||||
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
|
||||
case "search_end":
|
||||
searchTerm := config.Result
|
||||
searchTerm := conf.Result
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
@@ -652,10 +712,10 @@ Examples:
|
||||
})
|
||||
case "search_sites":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
siteID: conf.Result,
|
||||
})
|
||||
case "driveid_final":
|
||||
finalDriveID := config.Result
|
||||
finalDriveID := conf.Result
|
||||
|
||||
// Test the driveID and get drive type
|
||||
opts := rest.Opts{
|
||||
@@ -674,12 +734,12 @@ Examples:
|
||||
|
||||
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
|
||||
case "driveid_final_end":
|
||||
if config.Result == "true" {
|
||||
if conf.Result == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("choose_type")
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
return nil, fmt.Errorf("unknown state %q", conf.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -690,11 +750,14 @@ type Options struct {
|
||||
DriveType string `config:"drive_type"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
DisableSitePermission bool `config:"disable_site_permission"`
|
||||
ClientCredentials bool `config:"client_credentials"`
|
||||
AccessScopes fs.SpaceSepList `config:"access_scopes"`
|
||||
Tenant string `config:"tenant"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
NoVersions bool `config:"no_versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
@@ -814,7 +877,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
retry = true
|
||||
fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
|
||||
}
|
||||
case 429: // Too Many Requests.
|
||||
case 429, 503: // Too Many Requests, Server Too Busy
|
||||
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
|
||||
if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
|
||||
retryAfter, parseErr := strconv.Atoi(values[0])
|
||||
@@ -929,7 +992,8 @@ func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
errResponse := new(api.Error)
|
||||
err := rest.DecodeJSON(resp, &errResponse)
|
||||
if err != nil {
|
||||
// Redirects have no body so don't report an error
|
||||
if err != nil && resp.Header.Get("Location") == "" {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
}
|
||||
if errResponse.ErrorInfo.Code == "" {
|
||||
@@ -976,13 +1040,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||
oauthConfig.Scopes = opt.AccessScopes
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
|
||||
oauthConfig, err := makeOauthConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
@@ -1479,7 +1540,12 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
opts := f.newOptsCall(id, "DELETE", "")
|
||||
var opts rest.Opts
|
||||
if f.opt.HardDelete {
|
||||
opts = f.newOptsCall(id, "POST", "/permanentDelete")
|
||||
} else {
|
||||
opts = f.newOptsCall(id, "DELETE", "")
|
||||
}
|
||||
opts.NoResponse = true
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
@@ -1526,9 +1592,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return time.Millisecond
|
||||
}
|
||||
// While this is true for some OneDrive personal accounts, it
|
||||
// isn't true for all of them. See #8101 for details
|
||||
//
|
||||
// if f.driveType == driveTypePersonal {
|
||||
// return time.Millisecond
|
||||
// }
|
||||
return time.Second
|
||||
}
|
||||
|
||||
@@ -1587,7 +1656,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -1602,11 +1671,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
err := srcObj.readMetaData(ctx)
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Check we aren't overwriting a file on the same remote
|
||||
if srcObj.fs == f {
|
||||
srcPath := srcObj.rootPath()
|
||||
@@ -1909,7 +1985,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return shareURL, nil
|
||||
}
|
||||
|
||||
cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
|
||||
const cnvFailMsg = "Don't know how to convert share link to direct link - returning the link as is"
|
||||
directURL := ""
|
||||
segments := strings.Split(shareURL, "/")
|
||||
switch f.driveType {
|
||||
@@ -2520,6 +2596,9 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.Obje
|
||||
}
|
||||
// Set the mod time now and read metadata
|
||||
info, err = o.fs.fetchAndUpdateMetadata(ctx, src, options, o)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch and update metadata: %w", err)
|
||||
}
|
||||
return info, o.setMetaData(info)
|
||||
}
|
||||
|
||||
@@ -2531,8 +2610,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.New("can't upload content to a OneNote file")
|
||||
}
|
||||
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
// Only start the renewer if we have a valid one
|
||||
if o.fs.tokenRenewer != nil {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v
|
||||
@@ -50,7 +50,7 @@ func (f *Fs) TestWritePermissions(t *testing.T, r *fstest.Run) {
|
||||
file1 := r.WriteFile(randomFilename(), content, t2)
|
||||
|
||||
// add a permission with "read" role
|
||||
permissions := defaultPermissions()
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
permissions[0].Roles[0] = api.ReadRole
|
||||
expectedMeta, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
f.compareMeta(t, expectedMeta, actualMeta, false)
|
||||
@@ -59,7 +59,7 @@ func (f *Fs) TestWritePermissions(t *testing.T, r *fstest.Run) {
|
||||
found, num := false, 0
|
||||
foundCount := 0
|
||||
for i, p := range actualP {
|
||||
for _, identity := range p.GrantedToIdentities {
|
||||
for _, identity := range p.GetGrantedToIdentities(f.driveType) {
|
||||
if identity.User.DisplayName == testUserID {
|
||||
// note: expected will always be element 0 here, but actual may be variable based on org settings
|
||||
assert.Equal(t, expectedP[0].Roles, p.Roles)
|
||||
@@ -68,7 +68,7 @@ func (f *Fs) TestWritePermissions(t *testing.T, r *fstest.Run) {
|
||||
}
|
||||
}
|
||||
if f.driveType == driveTypePersonal {
|
||||
if p.GrantedTo != nil && p.GrantedTo.User != (api.Identity{}) && p.GrantedTo.User.ID == testUserID { // shows up in a different place on biz vs. personal
|
||||
if p.GetGrantedTo(f.driveType) != nil && p.GetGrantedTo(f.driveType).User != (api.Identity{}) && p.GetGrantedTo(f.driveType).User.ID == testUserID { // shows up in a different place on biz vs. personal
|
||||
assert.Equal(t, expectedP[0].Roles, p.Roles)
|
||||
found, num = true, i
|
||||
foundCount++
|
||||
@@ -106,7 +106,7 @@ func (f *Fs) TestWritePermissions(t *testing.T, r *fstest.Run) {
|
||||
found = false
|
||||
var foundP *api.PermissionsType
|
||||
for _, p := range actualP {
|
||||
if p.GrantedTo == nil || p.GrantedTo.User == (api.Identity{}) || p.GrantedTo.User.ID != testUserID {
|
||||
if p.GetGrantedTo(f.driveType) == nil || p.GetGrantedTo(f.driveType).User == (api.Identity{}) || p.GetGrantedTo(f.driveType).User.ID != testUserID {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
@@ -134,7 +134,7 @@ func (f *Fs) TestReadPermissions(t *testing.T, r *fstest.Run) {
|
||||
// test that what we got before vs. after is the same
|
||||
_ = f.opt.MetadataPermissions.Set("read")
|
||||
_, expectedMeta := f.putWithMeta(ctx, t, &file1, []*api.PermissionsType{}) // return var intentionally switched here
|
||||
permissions := defaultPermissions()
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
_, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
if f.driveType == driveTypePersonal {
|
||||
perms, ok := actualMeta["permissions"]
|
||||
@@ -150,7 +150,7 @@ func (f *Fs) TestReadMetadata(t *testing.T, r *fstest.Run) {
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
file1 := r.WriteFile(randomFilename(), "hello", t2)
|
||||
permissions := defaultPermissions()
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
|
||||
_ = f.opt.MetadataPermissions.Set("read,write")
|
||||
_, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
@@ -174,7 +174,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
_ = f.opt.MetadataPermissions.Set("read,write")
|
||||
permissions := defaultPermissions()
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
permissions[0].Roles[0] = api.ReadRole
|
||||
|
||||
expectedMeta := fs.Metadata{
|
||||
@@ -215,11 +215,11 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
||||
compareDirMeta(expectedMeta, actualMeta, false)
|
||||
|
||||
// modtime
|
||||
assert.Equal(t, t1.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t1, newDst.ModTime(ctx), f.Precision())
|
||||
// try changing it and re-check it
|
||||
newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, t2.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t2, newDst.ModTime(ctx), f.Precision())
|
||||
// ensure that f.DirSetModTime also works
|
||||
err = f.DirSetModTime(ctx, "subdir", t3)
|
||||
assert.NoError(t, err)
|
||||
@@ -227,7 +227,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
||||
assert.NoError(t, err)
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
if dir.Remote() == "subdir" {
|
||||
assert.True(t, t3.Truncate(f.Precision()).Equal(dir.ModTime(ctx)), fmt.Sprintf("got %v", dir.ModTime(ctx)))
|
||||
fstest.AssertTimeEqualWithPrecision(t, dir.Remote(), t3, dir.ModTime(ctx), f.Precision())
|
||||
}
|
||||
})
|
||||
|
||||
@@ -288,7 +288,7 @@ func (f *Fs) TestServerSideCopyMove(t *testing.T, r *fstest.Run) {
|
||||
file1 := r.WriteFile(randomFilename(), content, t2)
|
||||
|
||||
// add a permission with "read" role
|
||||
permissions := defaultPermissions()
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
permissions[0].Roles[0] = api.ReadRole
|
||||
expectedMeta, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
f.compareMeta(t, expectedMeta, actualMeta, false)
|
||||
@@ -331,7 +331,10 @@ func (f *Fs) TestMetadataMapper(t *testing.T, r *fstest.Run) {
|
||||
_ = f.opt.MetadataPermissions.Set("read,write")
|
||||
file1 := r.WriteFile(randomFilename(), content, t2)
|
||||
|
||||
const blob = `{"Metadata":{"permissions":"[{\"grantedToIdentities\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"}}`
|
||||
blob := `{"Metadata":{"permissions":"[{\"grantedToIdentities\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"}}`
|
||||
if f.driveType != driveTypePersonal {
|
||||
blob = `{"Metadata":{"permissions":"[{\"grantedToIdentitiesV2\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"}}`
|
||||
}
|
||||
|
||||
// Copy
|
||||
ci.MetadataMapper = []string{"echo", blob}
|
||||
@@ -347,7 +350,7 @@ func (f *Fs) TestMetadataMapper(t *testing.T, r *fstest.Run) {
|
||||
found := false
|
||||
foundCount := 0
|
||||
for _, p := range actualP {
|
||||
for _, identity := range p.GrantedToIdentities {
|
||||
for _, identity := range p.GetGrantedToIdentities(f.driveType) {
|
||||
if identity.User.DisplayName == testUserID {
|
||||
assert.Equal(t, []api.Role{api.ReadRole}, p.Roles)
|
||||
found = true
|
||||
@@ -355,7 +358,7 @@ func (f *Fs) TestMetadataMapper(t *testing.T, r *fstest.Run) {
|
||||
}
|
||||
}
|
||||
if f.driveType == driveTypePersonal {
|
||||
if p.GrantedTo != nil && p.GrantedTo.User != (api.Identity{}) && p.GrantedTo.User.ID == testUserID { // shows up in a different place on biz vs. personal
|
||||
if p.GetGrantedTo(f.driveType) != nil && p.GetGrantedTo(f.driveType).User != (api.Identity{}) && p.GetGrantedTo(f.driveType).User.ID == testUserID { // shows up in a different place on biz vs. personal
|
||||
assert.Equal(t, []api.Role{api.ReadRole}, p.Roles)
|
||||
found = true
|
||||
foundCount++
|
||||
@@ -376,7 +379,7 @@ func (f *Fs) putWithMeta(ctx context.Context, t *testing.T, file *fstest.Item, p
|
||||
}
|
||||
|
||||
expectedMeta.Set("permissions", marshalPerms(t, perms))
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, file, content, true, "plain/text", expectedMeta)
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, file, false, content, true, "plain/text", expectedMeta)
|
||||
do, ok := obj.(fs.Metadataer)
|
||||
require.True(t, ok)
|
||||
actualMeta, err := do.Metadata(ctx)
|
||||
@@ -449,11 +452,18 @@ func indent(t *testing.T, s string) string {
|
||||
return marshalPerms(t, p)
|
||||
}
|
||||
|
||||
func defaultPermissions() []*api.PermissionsType {
|
||||
func defaultPermissions(driveType string) []*api.PermissionsType {
|
||||
if driveType == driveTypePersonal {
|
||||
return []*api.PermissionsType{{
|
||||
GrantedTo: &api.IdentitySet{User: api.Identity{}},
|
||||
GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{ID: testUserID}}},
|
||||
Roles: []api.Role{api.WriteRole},
|
||||
}}
|
||||
}
|
||||
return []*api.PermissionsType{{
|
||||
GrantedTo: &api.IdentitySet{User: api.Identity{}},
|
||||
GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{ID: testUserID}}},
|
||||
Roles: []api.Role{api.WriteRole},
|
||||
GrantedToV2: &api.IdentitySet{User: api.Identity{}},
|
||||
GrantedToIdentitiesV2: []*api.IdentitySet{{User: api.Identity{ID: testUserID}}},
|
||||
Roles: []api.Role{api.WriteRole},
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,10 @@ package quickxorhash
|
||||
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
import "hash"
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize is the preferred size for hashing
|
||||
@@ -48,6 +51,11 @@ func New() hash.Hash {
|
||||
return &quickXorHash{}
|
||||
}
|
||||
|
||||
// xor dst with src
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
//
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
//go:build !go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
n := len(dst)
|
||||
if len(src) < n {
|
||||
n = len(src)
|
||||
}
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
dst = dst[:n]
|
||||
//src = src[:n]
|
||||
src = src[:len(dst)] // remove bounds check in loop
|
||||
for i := range dst {
|
||||
dst[i] ^= src[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
//go:build go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
import "crypto/subtle"
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
||||
@@ -92,6 +92,21 @@ Note that these chunks are buffered in memory so increasing them will
|
||||
increase memory use.`,
|
||||
Default: 10 * fs.Mebi,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access",
|
||||
Help: "Files and folders will be uploaded with this access permission (default private)",
|
||||
Default: "private",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "The file or folder access can be granted in a way that will allow select users to view, read or write what is absolutely essential for them.",
|
||||
}, {
|
||||
Value: "public",
|
||||
Help: "The file or folder can be downloaded by anyone from a web browser. The link can be shared in any way,",
|
||||
}, {
|
||||
Value: "hidden",
|
||||
Help: "The file or folder can be accessed has the same restrictions as Public if the user knows the URL of the file or folder link in order to access the contents",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -102,6 +117,7 @@ type Options struct {
|
||||
Password string `config:"password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Access string `config:"access"`
|
||||
}
|
||||
|
||||
// Fs represents a remote server
|
||||
@@ -404,6 +420,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var uInfo usersInfoResponse
|
||||
var resp *http.Response
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/info.json/" + f.session.SessionID,
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &uInfo)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(uInfo.StorageUsed),
|
||||
Total: fs.NewUsageValue(uInfo.MaxStorage * 1024 * 1024), // MaxStorage appears to be in MB
|
||||
Free: fs.NewUsageValue(uInfo.MaxStorage*1024*1024 - uInfo.StorageUsed),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
@@ -709,6 +751,23 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// getAccessLevel is a helper function to determine access level integer
|
||||
func getAccessLevel(access string) int64 {
|
||||
var accessLevel int64
|
||||
switch access {
|
||||
case "private":
|
||||
accessLevel = 0
|
||||
case "public":
|
||||
accessLevel = 1
|
||||
case "hidden":
|
||||
accessLevel = 2
|
||||
default:
|
||||
accessLevel = 0
|
||||
fs.Errorf(nil, "Invalid access: %s, defaulting to private", access)
|
||||
}
|
||||
return accessLevel
|
||||
}
|
||||
|
||||
// DirCacher methods
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
@@ -721,7 +780,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
SessionID: f.session.SessionID,
|
||||
FolderName: f.opt.Enc.FromStandardName(leaf),
|
||||
FolderSubParent: pathID,
|
||||
FolderIsPublic: 0,
|
||||
FolderIsPublic: getAccessLevel(f.opt.Access),
|
||||
FolderPublicUpl: 0,
|
||||
FolderPublicDisplay: 0,
|
||||
FolderPublicDnl: 0,
|
||||
@@ -1054,7 +1113,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Set permissions
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: 0}
|
||||
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: getAccessLevel(o.fs.opt.Access)}
|
||||
// fs.Debugf(nil, "Permissions : %#v", update)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -1147,6 +1206,7 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.ParentIDer = (*Object)(nil)
|
||||
|
||||
@@ -231,3 +231,10 @@ type permissions struct {
|
||||
type uploadFileChunkReply struct {
|
||||
TotalWritten int64 `json:"TotalWritten"`
|
||||
}
|
||||
|
||||
// usersInfoResponse describes OpenDrive users/info.json response
|
||||
type usersInfoResponse struct {
|
||||
// This response contains many other values but these are the only ones currently in use
|
||||
StorageUsed int64 `json:"StorageUsed,string"`
|
||||
MaxStorage int64 `json:"MaxStorage,string"`
|
||||
}
|
||||
|
||||
@@ -58,12 +58,10 @@ func populateSSECustomerKeys(opt *Options) error {
|
||||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||
if opt.SSECustomerKeySha256 == "" {
|
||||
opt.SSECustomerKeySha256 = sha256Checksum
|
||||
} else {
|
||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
} else if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
if opt.SSECustomerAlgorithm == "" {
|
||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
@@ -148,7 +149,7 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
|
||||
}
|
||||
md5sumBinary := m.Sum([]byte{})
|
||||
w.addMd5(&md5sumBinary, int64(chunkNumber))
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary)
|
||||
|
||||
// Object storage requires 1 <= PartNumber <= 10000
|
||||
ossPartNumber := chunkNumber + 1
|
||||
@@ -183,6 +184,9 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
|
||||
if ossPartNumber <= 8 {
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
}
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
@@ -279,7 +283,7 @@ func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
|
||||
if extend := end - int64(len(w.md5s)); extend > 0 {
|
||||
w.md5s = append(w.md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(w.md5s[start:end], (*md5binary)[:])
|
||||
copy(w.md5s[start:end], (*md5binary))
|
||||
}
|
||||
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
|
||||
@@ -106,9 +106,9 @@ func newOptions() []fs.Option {
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Required: false,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user