mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
589 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
583eee635f | ||
|
|
ba836e729e | ||
|
|
a6db2e7320 | ||
|
|
31486aa7e3 | ||
|
|
d24e5bc7e4 | ||
|
|
077c1a0f57 | ||
|
|
3bb82b4dd5 | ||
|
|
1591592936 | ||
|
|
cc036884d4 | ||
|
|
f6b9fdf7c6 | ||
|
|
c9fe2f75a8 | ||
|
|
340a67c012 | ||
|
|
264b3f0c90 | ||
|
|
a7978cea56 | ||
|
|
bebd82c586 | ||
|
|
af02c3b2a7 | ||
|
|
77dfe5f1fd | ||
|
|
e9a95a78de | ||
|
|
82ca5295f4 | ||
|
|
9d8a40b813 | ||
|
|
12d80c5219 | ||
|
|
038a87c569 | ||
|
|
3ef97993ad | ||
|
|
04bba67cd5 | ||
|
|
29dd29b9f3 | ||
|
|
532248352b | ||
|
|
ab803942de | ||
|
|
f933e80258 | ||
|
|
1c6f0101a5 | ||
|
|
c6f161de90 | ||
|
|
bdcf7fe28c | ||
|
|
776dc47eb8 | ||
|
|
167046e21a | ||
|
|
98d50d545a | ||
|
|
48242c5357 | ||
|
|
e437e6c209 | ||
|
|
b813a01718 | ||
|
|
77f7bb08af | ||
|
|
a5a61f4874 | ||
|
|
e8879f3e77 | ||
|
|
2a6675cffd | ||
|
|
fa4d171f62 | ||
|
|
d4d530bd8e | ||
|
|
f4b011e4e4 | ||
|
|
d80890bf32 | ||
|
|
39392d70dd | ||
|
|
643386f026 | ||
|
|
ed755bf04f | ||
|
|
071c3f28e5 | ||
|
|
7453b7d5f3 | ||
|
|
c9350149d8 | ||
|
|
08789a5815 | ||
|
|
4037af9c1a | ||
|
|
628ff8e524 | ||
|
|
578c75cb1e | ||
|
|
63ab250817 | ||
|
|
39f910a65d | ||
|
|
0fb36562dd | ||
|
|
8c25a15a40 | ||
|
|
f5ee16e201 | ||
|
|
2bcbed30bd | ||
|
|
5026a9171d | ||
|
|
b750c50bfd | ||
|
|
535acd0483 | ||
|
|
db37b3ef9e | ||
|
|
257607ab3d | ||
|
|
3ea1c5c4d2 | ||
|
|
bd23ea028e | ||
|
|
c58d4fe939 | ||
|
|
ddc7059a73 | ||
|
|
2677c43f26 | ||
|
|
48ab67f090 | ||
|
|
089df7d977 | ||
|
|
4fbe0652c9 | ||
|
|
47665dad07 | ||
|
|
ad724463a5 | ||
|
|
6afd7088d3 | ||
|
|
b33140ddeb | ||
|
|
b1c0ae5e7d | ||
|
|
40bcc7a90b | ||
|
|
be17f1523a | ||
|
|
bb58040d9c | ||
|
|
2db0e23584 | ||
|
|
a7337b0a95 | ||
|
|
7821cb884d | ||
|
|
85c29e3629 | ||
|
|
b7ec75aab6 | ||
|
|
38309f2df2 | ||
|
|
7487d34c33 | ||
|
|
e45cb4fc75 | ||
|
|
21008b4cd5 | ||
|
|
cffe85e6c5 | ||
|
|
d12a92eac9 | ||
|
|
11eeaaf792 | ||
|
|
a603efeaf4 | ||
|
|
0bd0a992a4 | ||
|
|
82c8d78a44 | ||
|
|
a83fec756b | ||
|
|
e953598987 | ||
|
|
feaa20d885 | ||
|
|
967fc6d7f4 | ||
|
|
b95bda1e92 | ||
|
|
9c14562850 | ||
|
|
f992742404 | ||
|
|
f2467d07aa | ||
|
|
d69cdb79f7 | ||
|
|
df5d92d709 | ||
|
|
1b5b36523b | ||
|
|
2f424ceecf | ||
|
|
bc986b44b2 | ||
|
|
f4b1a51af6 | ||
|
|
25703ad20e | ||
|
|
ab803d1278 | ||
|
|
0427177857 | ||
|
|
3dfcfc2caa | ||
|
|
d4cff1ae19 | ||
|
|
f5753369e4 | ||
|
|
4c76fac594 | ||
|
|
0d0bcdac31 | ||
|
|
f3bd02f0ef | ||
|
|
e6fde67491 | ||
|
|
b4e3332e02 | ||
|
|
0dea83a4aa | ||
|
|
e8f3f98aa0 | ||
|
|
d61328e459 | ||
|
|
9844704567 | ||
|
|
94a320f23c | ||
|
|
7fc573db27 | ||
|
|
af95616122 | ||
|
|
72f9f1e9c0 | ||
|
|
91b8152321 | ||
|
|
552b6c47ff | ||
|
|
01a155fb00 | ||
|
|
50d0597d56 | ||
|
|
123a030441 | ||
|
|
28ceb323ee | ||
|
|
c624dd5c3a | ||
|
|
a56c11753a | ||
|
|
4341d472aa | ||
|
|
b6e7148daf | ||
|
|
45458f2cdb | ||
|
|
de147b6e54 | ||
|
|
11de137660 | ||
|
|
156c372cd7 | ||
|
|
c979cde002 | ||
|
|
03aab1a123 | ||
|
|
dc803b572c | ||
|
|
4d19042a61 | ||
|
|
923989d1d7 | ||
|
|
cf65e36cf3 | ||
|
|
cf5457c2cd | ||
|
|
ea4aa696a5 | ||
|
|
34195fd3e8 | ||
|
|
40b8167ab4 | ||
|
|
e365f237f5 | ||
|
|
7d449572bd | ||
|
|
181fecaec3 | ||
|
|
7701d1d33d | ||
|
|
6dd736fbdc | ||
|
|
f36ca0cd25 | ||
|
|
9b3b1c7067 | ||
|
|
0dd0d6a13e | ||
|
|
e5bde42303 | ||
|
|
f01a50eb47 | ||
|
|
5ca61ab705 | ||
|
|
4ac4ce6afd | ||
|
|
40a874a0d8 | ||
|
|
f4dd86238d | ||
|
|
4f1eafb044 | ||
|
|
20c9e0cab6 | ||
|
|
9c09cf9cf6 | ||
|
|
3a3af00180 | ||
|
|
281e0c2d62 | ||
|
|
25b81b8789 | ||
|
|
90fdd97a7b | ||
|
|
3c58e0efe0 | ||
|
|
db744f64f6 | ||
|
|
480220a84a | ||
|
|
d0362171cf | ||
|
|
45887d11f6 | ||
|
|
c4bad5c1bc | ||
|
|
40de89df73 | ||
|
|
27f5297e8d | ||
|
|
de185de215 | ||
|
|
d362db2e08 | ||
|
|
db2a49e384 | ||
|
|
d63fcc6e44 | ||
|
|
4444037f5c | ||
|
|
a555513c26 | ||
|
|
039c260216 | ||
|
|
4577c08e05 | ||
|
|
c9ed691919 | ||
|
|
9f96c0d4ea | ||
|
|
91d095f468 | ||
|
|
bff702a6f1 | ||
|
|
a1d6bbd31f | ||
|
|
fb6a9dfbf3 | ||
|
|
3f3c5f3ff4 | ||
|
|
89196cb353 | ||
|
|
9284506b86 | ||
|
|
88c72d1f4d | ||
|
|
5e3bf50b2e | ||
|
|
982f76b4df | ||
|
|
347812d1d3 | ||
|
|
f4449440f8 | ||
|
|
e66675d346 | ||
|
|
45228e2f18 | ||
|
|
b866850fdd | ||
|
|
5b63b9534f | ||
|
|
10449c86a4 | ||
|
|
26a9a9fed2 | ||
|
|
602e42d334 | ||
|
|
4c5a21703e | ||
|
|
f2ee949eff | ||
|
|
3ad255172c | ||
|
|
29b1751d0e | ||
|
|
363da9aa82 | ||
|
|
6c8148ef39 | ||
|
|
3ed4a2e963 | ||
|
|
aaadb48d48 | ||
|
|
52e25c43b9 | ||
|
|
9a66563fc6 | ||
|
|
6ca670d66a | ||
|
|
809653055d | ||
|
|
61325ce507 | ||
|
|
c3989d1906 | ||
|
|
a79887171c | ||
|
|
f29e284c90 | ||
|
|
9a66086fa0 | ||
|
|
1845c261c6 | ||
|
|
70cbcef624 | ||
|
|
9169b2b5ab | ||
|
|
0957c8fb74 | ||
|
|
bb0cd76a5f | ||
|
|
08240c8cf5 | ||
|
|
014acc902d | ||
|
|
33fec9c835 | ||
|
|
3a5ffc7839 | ||
|
|
8a6bf35481 | ||
|
|
f7d27f4bf2 | ||
|
|
378a2d21ee | ||
|
|
3404eb0444 | ||
|
|
13e5701f2a | ||
|
|
432d5d1e20 | ||
|
|
cc05159518 | ||
|
|
119ccb2b95 | ||
|
|
0ef0e908ca | ||
|
|
0063d14dbb | ||
|
|
0d34efb10f | ||
|
|
415f4b2b93 | ||
|
|
07cf5f1d25 | ||
|
|
7d31956169 | ||
|
|
473d443874 | ||
|
|
e294b76121 | ||
|
|
8f3c583870 | ||
|
|
d0d41fe847 | ||
|
|
297f15a3e3 | ||
|
|
d5f0affd4b | ||
|
|
0598aafbfd | ||
|
|
528e22f139 | ||
|
|
f1a8420814 | ||
|
|
e250f1afcd | ||
|
|
ebf24c9872 | ||
|
|
b4c7b240d8 | ||
|
|
22a14a8c98 | ||
|
|
07133b892d | ||
|
|
a8ca18165e | ||
|
|
8c4e71fc84 | ||
|
|
351e2db2ef | ||
|
|
2234feb23d | ||
|
|
fb5125ecee | ||
|
|
e8cbc54a06 | ||
|
|
00512e1303 | ||
|
|
fcfbd3153b | ||
|
|
9a8075b682 | ||
|
|
996037bee9 | ||
|
|
e90537b2e9 | ||
|
|
42c211c6b2 | ||
|
|
3d4f127b33 | ||
|
|
ff966b37af | ||
|
|
3b6effa81a | ||
|
|
8308d5d640 | ||
|
|
14024936a8 | ||
|
|
9065e921c1 | ||
|
|
99788b605e | ||
|
|
d4cc3760e6 | ||
|
|
a6acbd1844 | ||
|
|
389565f5e2 | ||
|
|
4b4198522d | ||
|
|
f7665300c0 | ||
|
|
73beae147f | ||
|
|
92f8e476b7 | ||
|
|
5849148d51 | ||
|
|
37853ec412 | ||
|
|
ae7ff28714 | ||
|
|
9873f4bc74 | ||
|
|
1b200bf69a | ||
|
|
e3fa6fe3cc | ||
|
|
9e1b3861e7 | ||
|
|
e9a753f678 | ||
|
|
708391a5bf | ||
|
|
1cfed18aa7 | ||
|
|
7751d5a00b | ||
|
|
8274712c2c | ||
|
|
625a564ba3 | ||
|
|
2dd2072cdb | ||
|
|
998d1d1727 | ||
|
|
fcb912a664 | ||
|
|
5f938fb9ed | ||
|
|
72b79504ea | ||
|
|
3e2a606adb | ||
|
|
95a6e3e338 | ||
|
|
d06bb55f3f | ||
|
|
9f3694cea3 | ||
|
|
2c50f26c36 | ||
|
|
22d6c8d30d | ||
|
|
96fb75c5a7 | ||
|
|
acd67edf9a | ||
|
|
b26db8e640 | ||
|
|
da955e5d4f | ||
|
|
4f8dab8bce | ||
|
|
000ddc4951 | ||
|
|
3faa84b47c | ||
|
|
e1162ec440 | ||
|
|
30cccc7101 | ||
|
|
1f5a29209e | ||
|
|
45255bccb3 | ||
|
|
055206c4ee | ||
|
|
f3070b82bc | ||
|
|
7e2deffc62 | ||
|
|
ae3ff50580 | ||
|
|
6486ba6344 | ||
|
|
7842000f8a | ||
|
|
1f9c962183 | ||
|
|
279d9ecc56 | ||
|
|
31773ecfbf | ||
|
|
666e34cf69 | ||
|
|
5a84a08b3f | ||
|
|
51a468b2ba | ||
|
|
fc798d800c | ||
|
|
3115ede1d8 | ||
|
|
7a5491ba7b | ||
|
|
a6cf4989b6 | ||
|
|
f489b54fa0 | ||
|
|
6244d1729b | ||
|
|
e97c2a2832 | ||
|
|
56bf9b4a10 | ||
|
|
ceb9406c2f | ||
|
|
1f887f7ba0 | ||
|
|
7db26b6b34 | ||
|
|
37a3309438 | ||
|
|
97be9015a4 | ||
|
|
487e4f09b3 | ||
|
|
09a408664d | ||
|
|
43fa256d56 | ||
|
|
6859c04772 | ||
|
|
38a0539096 | ||
|
|
2cd85813b4 | ||
|
|
e6e6069ecf | ||
|
|
fcf47a8393 | ||
|
|
46a323ae14 | ||
|
|
72be80ddca | ||
|
|
a9e7e7bcc2 | ||
|
|
925c4382e2 | ||
|
|
08c60c3091 | ||
|
|
5c594fea90 | ||
|
|
cc01223535 | ||
|
|
aaacfa51a0 | ||
|
|
c18c66f167 | ||
|
|
d6667d34e7 | ||
|
|
e649cf4d50 | ||
|
|
f080ec437c | ||
|
|
4023eaebe0 | ||
|
|
baf16a65f0 | ||
|
|
70fe2ac852 | ||
|
|
41cf7faea4 | ||
|
|
f226f2dfb1 | ||
|
|
31caa019fa | ||
|
|
0468375054 | ||
|
|
6001f05a12 | ||
|
|
f7b87a8049 | ||
|
|
d379641021 | ||
|
|
84281c9089 | ||
|
|
8e2dc069d2 | ||
|
|
61d6f538b3 | ||
|
|
65b2e378e0 | ||
|
|
dea6bdf3df | ||
|
|
27eb8c7f45 | ||
|
|
1607344613 | ||
|
|
5f138dd822 | ||
|
|
2520c05c4b | ||
|
|
f7f5e87632 | ||
|
|
a7e6806f26 | ||
|
|
d0eb884262 | ||
|
|
ae6874170f | ||
|
|
f5bab284c3 | ||
|
|
c75dfa6436 | ||
|
|
56eb82bdfc | ||
|
|
066e00b470 | ||
|
|
e0c445d36e | ||
|
|
74652bf318 | ||
|
|
b6a95c70e9 | ||
|
|
aca7d0fd22 | ||
|
|
12761b3058 | ||
|
|
3567a47258 | ||
|
|
6b670bd439 | ||
|
|
335ca6d572 | ||
|
|
c4a9e480c9 | ||
|
|
232d304c13 | ||
|
|
44ac79e357 | ||
|
|
0487e465ee | ||
|
|
bb6cfe109d | ||
|
|
864eb89a67 | ||
|
|
4471e6f258 | ||
|
|
e82db0b7d5 | ||
|
|
72e624c5e4 | ||
|
|
6092fa57c3 | ||
|
|
3e15a594b7 | ||
|
|
db8c007983 | ||
|
|
5836da14c2 | ||
|
|
8ed07d11a0 | ||
|
|
1f2ee44c20 | ||
|
|
32798dca25 | ||
|
|
075f98551f | ||
|
|
963ab220f6 | ||
|
|
281a007b1a | ||
|
|
589b7b4873 | ||
|
|
04d2781fda | ||
|
|
5b95fd9588 | ||
|
|
a42643101e | ||
|
|
bcca67efd5 | ||
|
|
7771aaacf6 | ||
|
|
fda06fc17d | ||
|
|
2faa4758e4 | ||
|
|
9a9ef040e3 | ||
|
|
ca403dc90e | ||
|
|
451f4c2a8f | ||
|
|
5f6b105c3e | ||
|
|
d98837b7e6 | ||
|
|
99dd748fec | ||
|
|
bdfe213c47 | ||
|
|
52fbb10b47 | ||
|
|
6cb584f455 | ||
|
|
ec8bbb8d30 | ||
|
|
fcdffab480 | ||
|
|
aeb568c494 | ||
|
|
b07f575d07 | ||
|
|
ebae647dfa | ||
|
|
6fd5b469bc | ||
|
|
78e822dd79 | ||
|
|
a79db20bcd | ||
|
|
d67ef19f6e | ||
|
|
037a6bd1b0 | ||
|
|
09b884aade | ||
|
|
243bcc9d07 | ||
|
|
64cf9ac911 | ||
|
|
15a3ec8fa1 | ||
|
|
2b8af4d23f | ||
|
|
5755e31ef0 | ||
|
|
f4c787ab74 | ||
|
|
4d7b6e14b8 | ||
|
|
9ea7d143dd | ||
|
|
927e721a25 | ||
|
|
bd46f01eb4 | ||
|
|
5f4d7154c0 | ||
|
|
bad8a01850 | ||
|
|
d808c3848a | ||
|
|
3f0bec2ee9 | ||
|
|
8fb9eb2fee | ||
|
|
01fa15a7d9 | ||
|
|
6aaa5d7a75 | ||
|
|
b4d3411637 | ||
|
|
01ddc8ca6c | ||
|
|
16c1e7149e | ||
|
|
0374ea2c79 | ||
|
|
2e2451f8ec | ||
|
|
bd1e3448b3 | ||
|
|
20909fa294 | ||
|
|
c502e00c87 | ||
|
|
9172c9b3dd | ||
|
|
78deab05f9 | ||
|
|
6c9d377bbb | ||
|
|
62ddc9b7f9 | ||
|
|
448ae49fa4 | ||
|
|
5f3c276d0a | ||
|
|
9cea493f58 | ||
|
|
400d1a4468 | ||
|
|
851ce0f4fe | ||
|
|
cc885bd39a | ||
|
|
a1a8c21c70 | ||
|
|
6ef4bd8c45 | ||
|
|
fb316123ec | ||
|
|
270af61665 | ||
|
|
155f4f2e21 | ||
|
|
eaf593884b | ||
|
|
930574c6e9 | ||
|
|
c1586a9866 | ||
|
|
432eb74814 | ||
|
|
92fb644fb6 | ||
|
|
bb92af693a | ||
|
|
eb5fd07131 | ||
|
|
b2ce7c9aa6 | ||
|
|
d6b46e41dd | ||
|
|
254c6ef1dd | ||
|
|
547f943851 | ||
|
|
8611c9f6f7 | ||
|
|
f6576237a4 | ||
|
|
207b64865e | ||
|
|
9ee1b21ec2 | ||
|
|
55a12bd639 | ||
|
|
3b4a57dab9 | ||
|
|
afe158f878 | ||
|
|
722a3f32cc | ||
|
|
9183618082 | ||
|
|
18ebca3979 | ||
|
|
e84d2c9e5f | ||
|
|
e98b61ceeb | ||
|
|
19f9fca2f6 | ||
|
|
7dbf1ab66f | ||
|
|
bfe272bf67 | ||
|
|
cce8936802 | ||
|
|
043bf3567d | ||
|
|
1b2f2c0d69 | ||
|
|
4b376514a6 | ||
|
|
c27e6a89b0 | ||
|
|
76c6e3b15c | ||
|
|
48ec00cc1a | ||
|
|
866600a73b | ||
|
|
d8f4cd4d5f | ||
|
|
d0810b602a | ||
|
|
d5afcf9e34 | ||
|
|
07c4d95f38 | ||
|
|
fd83071b6b | ||
|
|
e042d9089f | ||
|
|
cdfa0beafb | ||
|
|
ddb3b17e96 | ||
|
|
32f71c97ea | ||
|
|
53853116fb | ||
|
|
a887856998 | ||
|
|
0df7466d2b | ||
|
|
23579e3b99 | ||
|
|
3affba6fa6 | ||
|
|
542677d807 | ||
|
|
d481aa8613 | ||
|
|
15e633fa8b | ||
|
|
732c24c624 | ||
|
|
75dfdbf211 | ||
|
|
5f07113a4b | ||
|
|
6a380bcc67 | ||
|
|
97276ce765 | ||
|
|
a23a7a807f | ||
|
|
c6a4caaf7e | ||
|
|
5574733dcb | ||
|
|
49c21d0b6e | ||
|
|
0ea2ce3674 | ||
|
|
3ddf824251 | ||
|
|
68fdff3c27 | ||
|
|
c003485ae3 | ||
|
|
99d5080191 | ||
|
|
2ad217eedd | ||
|
|
a3eb7f1142 | ||
|
|
6d620b6d88 | ||
|
|
9f8357ada7 | ||
|
|
e5a1bcb1ce | ||
|
|
46484022b0 | ||
|
|
ab746ef891 | ||
|
|
6241c1ae43 | ||
|
|
0f8d3fe6a3 | ||
|
|
07afb9e700 | ||
|
|
3165093feb | ||
|
|
4af0c1d902 | ||
|
|
82f9554474 | ||
|
|
d8d53b7aa0 | ||
|
|
8c9048259a | ||
|
|
0361acbde4 | ||
|
|
f5bf0a48f3 | ||
|
|
cec843dd8c | ||
|
|
54a9488e59 | ||
|
|
29fe0177bd | ||
|
|
0e134364ac | ||
|
|
0d8350d95d | ||
|
|
497e373e31 | ||
|
|
ed8fea4aa5 | ||
|
|
4d7f75dd76 | ||
|
|
53e757aea9 | ||
|
|
f578896745 | ||
|
|
13be03cb86 | ||
|
|
864e02409e | ||
|
|
fccc779a15 |
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
@@ -1,4 +0,0 @@
|
||||
github: [ncw]
|
||||
patreon: njcw
|
||||
liberapay: ncw
|
||||
custom: ["https://rclone.org/donate/"]
|
||||
16
.github/dependabot.yml
vendored
16
.github/dependabot.yml
vendored
@@ -1,10 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
55
.github/workflows/build.yml
vendored
55
.github/workflows/build.yml
vendored
@@ -8,9 +8,9 @@ name: build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
- '**'
|
||||
tags:
|
||||
- '*'
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -27,12 +27,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.21'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +59,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.21'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.20'
|
||||
go: '1.21'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,35 +76,35 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.19
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
@@ -130,6 +130,11 @@ jobs:
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||
unset HOMEBREW_NO_INSTALL_FROM_API
|
||||
brew untap --force homebrew/core
|
||||
brew untap --force homebrew/cask
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macos-11'
|
||||
@@ -217,7 +222,7 @@ jobs:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
@@ -227,7 +232,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
@@ -237,9 +242,9 @@ jobs:
|
||||
|
||||
# Run govulncheck on the latest go version, the one we build binaries with
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -256,15 +261,15 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
@@ -352,4 +357,4 @@ jobs:
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
77
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
77
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
26
.github/workflows/build_publish_docker_image.yml
vendored
26
.github/workflows/build_publish_docker_image.yml
vendored
@@ -1,26 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
|
||||
2
.github/workflows/winget.yml
vendored
2
.github/workflows/winget.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: windows-latest # Action can only run on Windows
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,10 +8,10 @@ rclone.iml
|
||||
.idea
|
||||
.history
|
||||
*.test
|
||||
*.log
|
||||
*.iml
|
||||
fuzz-build.zip
|
||||
*.orig
|
||||
*.rej
|
||||
Thumbs.db
|
||||
__pycache__
|
||||
.DS_Store
|
||||
@@ -2,15 +2,17 @@
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
- staticcheck
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
@@ -25,6 +27,74 @@ issues:
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
# don't disable the revive messages about comments on exported functions
|
||||
include:
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
- EXC0014
|
||||
- EXC0015
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
# setting rules seems to disable all the rules, so re-enable them here
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: false
|
||||
- name: context-as-argument
|
||||
disabled: false
|
||||
- name: context-keys-type
|
||||
disabled: false
|
||||
- name: dot-imports
|
||||
disabled: false
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: false
|
||||
- name: error-return
|
||||
disabled: false
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
- name: errorf
|
||||
disabled: false
|
||||
- name: exported
|
||||
disabled: false
|
||||
- name: increment-decrement
|
||||
disabled: true
|
||||
- name: indent-error-flow
|
||||
disabled: false
|
||||
- name: package-comments
|
||||
disabled: false
|
||||
- name: range
|
||||
disabled: false
|
||||
- name: receiver-naming
|
||||
disabled: false
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
- name: time-naming
|
||||
disabled: false
|
||||
- name: unexported-return
|
||||
disabled: false
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: var-declaration
|
||||
disabled: false
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
|
||||
@@ -419,7 +419,7 @@ remote or an fs.
|
||||
|
||||
Research
|
||||
|
||||
* Look at the interfaces defined in `fs/fs.go`
|
||||
* Look at the interfaces defined in `fs/types.go`
|
||||
* Study one or more of the existing remotes
|
||||
|
||||
Getting going
|
||||
@@ -428,14 +428,19 @@ Getting going
|
||||
* box is a good one to start from if you have a directory-based remote
|
||||
* b2 is a good one to start from if you have a bucket-based remote
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Important:
|
||||
|
||||
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
|
||||
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
||||
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
@@ -469,7 +474,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
* update them with `make backenddocs` - revert any changes in other backends
|
||||
* update them in your backend with `bin/make_backend_docs.py remote`
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/_index.md` - front page of rclone.org
|
||||
|
||||
@@ -16,6 +16,11 @@ Current active maintainers of rclone are:
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
| albertony | @albertony | |
|
||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
33126
MANUAL.html
generated
33126
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
34157
MANUAL.txt
generated
34157
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
2
Makefile
2
Makefile
@@ -96,7 +96,7 @@ build_dep:
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
|
||||
10
README.md
10
README.md
@@ -25,18 +25,19 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
@@ -50,6 +51,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
@@ -60,16 +62,21 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
@@ -80,6 +87,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
|
||||
22
RELEASE.md
22
RELEASE.md
@@ -90,6 +90,28 @@ Now
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
|
||||
## Update the website between releases
|
||||
|
||||
Create an update website branch based off the last release
|
||||
|
||||
git co -b update-website
|
||||
|
||||
If the branch already exists, double check there are no commits that need saving.
|
||||
|
||||
Now reset the branch to the last release
|
||||
|
||||
git reset --hard v1.64.0
|
||||
|
||||
Create the changes, check them in, test with `make serve` then
|
||||
|
||||
make upload_test_website
|
||||
|
||||
Check out https://test.rclone.org and when happy
|
||||
|
||||
make upload_website
|
||||
|
||||
Cherry pick any changes back to master and the stable branch if it is active.
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||
|
||||
@@ -36,9 +36,12 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/quatrix"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -20,17 +20,18 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
|
||||
func TestIncrement(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []byte
|
||||
want []byte
|
||||
in [8]byte
|
||||
want [8]byte
|
||||
}{
|
||||
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
||||
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
||||
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
||||
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
||||
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
|
||||
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
|
||||
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
|
||||
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
|
||||
} {
|
||||
increment(test.in)
|
||||
increment(&test.in)
|
||||
assert.Equal(t, test.want, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -25,6 +26,25 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration2 runs integration tests against the remote
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestAzureBlob"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js || !go1.18
|
||||
// +build plan9 solaris js !go1.18
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package azureblob
|
||||
|
||||
168
backend/b2/b2.go
168
backend/b2/b2.go
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -57,9 +58,7 @@ const (
|
||||
minChunkSize = 5 * fs.Mebi
|
||||
defaultChunkSize = 96 * fs.Mebi
|
||||
defaultUploadCutoff = 200 * fs.Mebi
|
||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -75,13 +74,15 @@ func init() {
|
||||
Description: "Backblaze B2",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
@@ -147,6 +148,18 @@ might a maximum of "--transfers" chunks in progress at once.
|
||||
5,000,000 Bytes is the minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--b2-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
Default: 4,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files.
|
||||
@@ -186,16 +199,16 @@ The minimum value is 1 second. The maximum value is one week.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: memoryPoolFlushTime,
|
||||
Default: fs.Duration(time.Minute),
|
||||
Advanced: true,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
|
||||
}, {
|
||||
Name: "memory_pool_use_mmap",
|
||||
Default: memoryPoolUseMmap,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -222,11 +235,10 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -251,7 +263,6 @@ type Fs struct {
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
pool *pool.Pool // memory pool
|
||||
}
|
||||
|
||||
// Object describes a b2 object
|
||||
@@ -456,12 +467,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
@@ -595,23 +600,24 @@ func (f *Fs) clearUploadURL(bucketID string) {
|
||||
f.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
|
||||
// getRW gets a RW buffer and an upload token
|
||||
//
|
||||
// If noBuf is set then it just gets an upload token
|
||||
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
|
||||
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
|
||||
f.uploadToken.Get()
|
||||
if !noBuf {
|
||||
buf = f.pool.Get()
|
||||
rw = multipart.NewRW()
|
||||
}
|
||||
return buf
|
||||
return rw
|
||||
}
|
||||
|
||||
// putBuf returns a buffer to the memory pool and an upload token
|
||||
// putRW returns a RW buffer to the memory pool and returns an upload
|
||||
// token
|
||||
//
|
||||
// If noBuf is set then it just returns the upload token
|
||||
func (f *Fs) putBuf(buf []byte, noBuf bool) {
|
||||
if !noBuf {
|
||||
f.pool.Put(buf)
|
||||
// If buf is nil then it just returns the upload token
|
||||
func (f *Fs) putRW(rw *pool.RW) {
|
||||
if rw != nil {
|
||||
_ = rw.Close()
|
||||
}
|
||||
f.uploadToken.Put()
|
||||
}
|
||||
@@ -1291,7 +1297,11 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Upload(ctx)
|
||||
err = up.Copy(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dstObj.decodeMetaDataFileInfo(up.info)
|
||||
}
|
||||
|
||||
dstBucket, dstPath := dstObj.split()
|
||||
@@ -1420,7 +1430,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
absPath := "/" + bucketPath
|
||||
absPath := "/" + urlEncode(bucketPath)
|
||||
link = RootURL + "/file/" + urlEncode(bucket) + absPath
|
||||
bucketType, err := f.getbucketType(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -1859,11 +1869,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size == -1 {
|
||||
if size < 0 {
|
||||
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
|
||||
buf := o.fs.getBuf(false)
|
||||
rw := o.fs.getRW(false)
|
||||
|
||||
n, err := io.ReadFull(in, buf)
|
||||
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
|
||||
if err == nil {
|
||||
bufReader := bufio.NewReader(in)
|
||||
in = bufReader
|
||||
@@ -1874,26 +1884,34 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
if err != nil {
|
||||
o.fs.putBuf(buf, false)
|
||||
o.fs.putRW(rw)
|
||||
return err
|
||||
}
|
||||
// NB Stream returns the buffer and token
|
||||
return up.Stream(ctx, buf)
|
||||
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = up.Stream(ctx, rw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.decodeMetaDataFileInfo(up.info)
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
||||
defer o.fs.putBuf(buf, false)
|
||||
size = int64(n)
|
||||
in = bytes.NewReader(buf[:n])
|
||||
defer o.fs.putRW(rw)
|
||||
size = n
|
||||
in = rw
|
||||
} else {
|
||||
o.fs.putBuf(buf, false)
|
||||
o.fs.putRW(rw)
|
||||
return err
|
||||
}
|
||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Upload(ctx)
|
||||
up := chunkWriter.(*largeUpload)
|
||||
return o.decodeMetaDataFileInfo(up.info)
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
@@ -2001,6 +2019,41 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// FIXME what if file is smaller than 1 chunk?
|
||||
if f.opt.Versions {
|
||||
return info, nil, errNotWithVersions
|
||||
}
|
||||
if f.opt.VersionAt.IsSet() {
|
||||
return info, nil, errNotWithVersionAt
|
||||
}
|
||||
//size := src.Size()
|
||||
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
|
||||
bucket, _ := o.split()
|
||||
err = f.makeBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(f.opt.ChunkSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
|
||||
return info, up, err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -2028,14 +2081,15 @@ func (o *Object) ID() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,10 +1,19 @@
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
@@ -168,3 +177,100 @@ func TestParseTimeString(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// The integration tests do a reasonable job of testing the normal
|
||||
// copy but don't test the chunked copy.
|
||||
func (f *Fs) InternalTestChunkedCopy(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
contents := random.String(8 * 1024 * 1024)
|
||||
item := fstest.NewItem("chunked-copy", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
src := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
defer func() {
|
||||
assert.NoError(t, src.Remove(ctx))
|
||||
}()
|
||||
|
||||
var itemCopy = item
|
||||
itemCopy.Path += ".copy"
|
||||
|
||||
// Set copy cutoff to mininum value so we make chunks
|
||||
origCutoff := f.opt.CopyCutoff
|
||||
f.opt.CopyCutoff = minChunkSize
|
||||
defer func() {
|
||||
f.opt.CopyCutoff = origCutoff
|
||||
}()
|
||||
|
||||
// Do the copy
|
||||
dst, err := f.Copy(ctx, src, itemCopy.Path)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, dst.Remove(ctx))
|
||||
}()
|
||||
|
||||
// Check size
|
||||
assert.Equal(t, src.Size(), dst.Size())
|
||||
|
||||
// Check modtime
|
||||
srcModTime := src.ModTime(ctx)
|
||||
dstModTime := dst.ModTime(ctx)
|
||||
assert.True(t, srcModTime.Equal(dstModTime))
|
||||
|
||||
// Make sure contents are correct
|
||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||
assert.Equal(t, contents, gotContents)
|
||||
}
|
||||
|
||||
// The integration tests do a reasonable job of testing the normal
|
||||
// streaming upload but don't test the chunked streaming upload.
|
||||
func (f *Fs) InternalTestChunkedStreamingUpload(t *testing.T, size int) {
|
||||
ctx := context.Background()
|
||||
contents := random.String(size)
|
||||
item := fstest.NewItem(fmt.Sprintf("chunked-streaming-upload-%d", size), contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
|
||||
// Set chunk size to mininum value so we make chunks
|
||||
origOpt := f.opt
|
||||
f.opt.ChunkSize = minChunkSize
|
||||
f.opt.UploadCutoff = 0
|
||||
defer func() {
|
||||
f.opt = origOpt
|
||||
}()
|
||||
|
||||
// Do the streaming upload
|
||||
src := object.NewStaticObjectInfo(item.Path, item.ModTime, -1, true, item.Hashes, f)
|
||||
in := bytes.NewBufferString(contents)
|
||||
dst, err := f.PutStream(ctx, in, src)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, dst.Remove(ctx))
|
||||
}()
|
||||
|
||||
// Check size
|
||||
assert.Equal(t, int64(size), dst.Size())
|
||||
|
||||
// Check modtime
|
||||
srcModTime := src.ModTime(ctx)
|
||||
dstModTime := dst.ModTime(ctx)
|
||||
assert.Equal(t, srcModTime, dstModTime)
|
||||
|
||||
// Make sure contents are correct
|
||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||
assert.Equal(t, contents, gotContents, "Contents incorrect")
|
||||
}
|
||||
|
||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("ChunkedCopy", f.InternalTestChunkedCopy)
|
||||
for _, size := range []fs.SizeSuffix{
|
||||
minChunkSize - 1,
|
||||
minChunkSize,
|
||||
minChunkSize + 1,
|
||||
(3 * minChunkSize) / 2,
|
||||
(5 * minChunkSize) / 2,
|
||||
} {
|
||||
t.Run(fmt.Sprintf("ChunkedStreamingUpload/%d", size), func(t *testing.T) {
|
||||
f.InternalTestChunkedStreamingUpload(t, int(size))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -80,12 +78,14 @@ type largeUpload struct {
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
parts int // calculated number of parts, if known
|
||||
sha1smu sync.Mutex // mutex to protect sha1s
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
chunkSize int64 // chunk size to use
|
||||
src *Object // if copying, object we are reading from
|
||||
info *api.FileInfo // final response with info about the object
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
@@ -93,18 +93,16 @@ type largeUpload struct {
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
parts := 0
|
||||
chunkSize := defaultChunkSize
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
||||
parts = size / int64(chunkSize)
|
||||
parts = int(size / int64(chunkSize))
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -152,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, sha1SliceSize),
|
||||
sha1s: make([]string, 0, 16),
|
||||
chunkSize: int64(chunkSize),
|
||||
}
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
@@ -171,24 +169,26 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_part_url",
|
||||
}
|
||||
var request = api.GetUploadPartURLRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||
}
|
||||
} else {
|
||||
if len(up.uploads) > 0 {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
up.uploadMu.Unlock()
|
||||
return upload, nil
|
||||
}
|
||||
up.uploadMu.Unlock()
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_part_url",
|
||||
}
|
||||
var request = api.GetUploadPartURLRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
err = up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
@@ -203,10 +203,39 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
// Add an sha1 to the being built up sha1s
|
||||
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
|
||||
up.sha1smu.Lock()
|
||||
defer up.sha1smu.Unlock()
|
||||
if len(up.sha1s) < chunkNumber+1 {
|
||||
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
|
||||
}
|
||||
up.sha1s[chunkNumber] = sha1
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
|
||||
// Only account after the checksum reads have been done
|
||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||
// multiple of what it should be, increase or decrease this number.
|
||||
do.DelayAccounting(1)
|
||||
}
|
||||
|
||||
err = up.f.pacer.Call(func() (bool, error) {
|
||||
// Discover the size by seeking to the end
|
||||
size, err = reader.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// rewind the reader on retry and after reading size
|
||||
_, err = reader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL(ctx)
|
||||
@@ -214,8 +243,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
return false, err
|
||||
}
|
||||
|
||||
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||
in := newHashAppendingReader(reader, sha1.New())
|
||||
sizeWithHash := size + int64(in.AdditionalLength())
|
||||
|
||||
// Authorization
|
||||
//
|
||||
@@ -245,10 +274,10 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
Body: up.wrap(in),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
|
||||
sha1Header: "hex_digits_at_end",
|
||||
},
|
||||
ContentLength: &size,
|
||||
ContentLength: &sizeWithHash,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
@@ -256,7 +285,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
|
||||
}
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
@@ -264,30 +293,30 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
up.sha1s[part-1] = in.HexSum()
|
||||
up.addSha1(chunkNumber, in.HexSum())
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
|
||||
}
|
||||
return err
|
||||
return size, err
|
||||
}
|
||||
|
||||
// Copy a chunk
|
||||
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
|
||||
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_copy_part",
|
||||
}
|
||||
offset := (part - 1) * up.chunkSize // where we are in the source file
|
||||
offset := int64(part) * up.chunkSize // where we are in the source file
|
||||
var request = api.CopyPartRequest{
|
||||
SourceID: up.src.id,
|
||||
LargeFileID: up.id,
|
||||
PartNumber: part,
|
||||
PartNumber: int64(part + 1),
|
||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||
}
|
||||
var response api.UploadPartResponse
|
||||
@@ -296,7 +325,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
up.sha1s[part-1] = response.SHA1
|
||||
up.addSha1(part, response.SHA1)
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -307,8 +336,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
// Close closes off the large upload
|
||||
func (up *largeUpload) Close(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -326,11 +355,12 @@ func (up *largeUpload) finish(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
up.info = &response
|
||||
return nil
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
// Abort aborts the large upload
|
||||
func (up *largeUpload) Abort(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -355,157 +385,99 @@ func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
// reaches EOF.
|
||||
//
|
||||
// Note that initialUploadBlock must be returned to f.putBuf()
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
hasMoreParts = true
|
||||
)
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
g.Go(func() error {
|
||||
for part := int64(1); hasMoreParts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
var buf []byte
|
||||
if part == 1 {
|
||||
buf = initialUploadBlock
|
||||
} else {
|
||||
buf = up.f.getBuf(false)
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putBuf(buf, false)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
if part == 1 {
|
||||
n = len(buf)
|
||||
} else {
|
||||
n, err = io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
hasMoreParts = false
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putBuf(buf, false)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putBuf(buf, false)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
up.f.putBuf(buf, false)
|
||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putBuf(buf, false)
|
||||
return up.transferChunk(gCtx, part, buf)
|
||||
})
|
||||
up.size = initialUploadBlock.Size()
|
||||
up.parts = 0
|
||||
for part := 0; hasMoreParts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
var rw *pool.RW
|
||||
if part == 0 {
|
||||
rw = initialUploadBlock
|
||||
} else {
|
||||
rw = up.f.getRW(false)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putRW(rw)
|
||||
break
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
var n int64
|
||||
if part == 0 {
|
||||
n = rw.Size()
|
||||
} else {
|
||||
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
||||
if err == io.EOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
hasMoreParts = false
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putRW(rw)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts += 1
|
||||
up.size += n
|
||||
if part > maxParts {
|
||||
up.f.putRW(rw)
|
||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putRW(rw)
|
||||
_, err = up.WriteChunk(gCtx, part, rw)
|
||||
return err
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
return up.finish(ctx)
|
||||
return up.Close(ctx)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
// Copy the chunks from the source to the destination
|
||||
func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
uploadPool *pool.Pool
|
||||
ci = fs.GetConfig(ctx)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
)
|
||||
// If using large chunk size then make a temporary pool
|
||||
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
||||
uploadPool = up.f.pool
|
||||
} else {
|
||||
uploadPool = pool.New(
|
||||
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
||||
int(up.chunkSize),
|
||||
ci.Transfers,
|
||||
up.f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
defer uploadPool.Flush()
|
||||
}
|
||||
// Get an upload token and a buffer
|
||||
getBuf := func() (buf []byte) {
|
||||
up.f.getBuf(true)
|
||||
if !up.doCopy {
|
||||
buf = uploadPool.Get()
|
||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
||||
for part := 0; part < up.parts; part++ {
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
return buf
|
||||
}
|
||||
// Put an upload token and a buffer
|
||||
putBuf := func(buf []byte) {
|
||||
if !up.doCopy {
|
||||
uploadPool.Put(buf)
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= up.chunkSize {
|
||||
reqSize = up.chunkSize
|
||||
}
|
||||
up.f.putBuf(nil, true)
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
return up.copyChunk(gCtx, part, reqSize)
|
||||
})
|
||||
remaining -= reqSize
|
||||
}
|
||||
g.Go(func() error {
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
buf := getBuf()
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
putBuf(buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= up.chunkSize {
|
||||
reqSize = up.chunkSize
|
||||
}
|
||||
|
||||
if !up.doCopy {
|
||||
// Read the chunk
|
||||
buf = buf[:reqSize]
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
putBuf(buf)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer putBuf(buf)
|
||||
if !up.doCopy {
|
||||
err = up.transferChunk(gCtx, part, buf)
|
||||
} else {
|
||||
err = up.copyChunk(gCtx, part, reqSize)
|
||||
}
|
||||
return err
|
||||
})
|
||||
remaining -= reqSize
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.finish(ctx)
|
||||
return up.Close(ctx)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func (e *Error) Error() string {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.ContextInfo != nil {
|
||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
||||
|
||||
// Types of things in Item
|
||||
// Types of things in Item/ItemMini
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
@@ -72,20 +72,31 @@ const (
|
||||
ItemStatusDeleted = "deleted"
|
||||
)
|
||||
|
||||
// ItemMini is a subset of the elements in a full Item returned by some API calls
|
||||
type ItemMini struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID int64 `json:"sequence_id,string"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
type Item struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID int64 `json:"sequence_id,string"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
Parent ItemMini `json:"parent"`
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
@@ -281,3 +292,30 @@ type User struct {
|
||||
Address string `json:"address"`
|
||||
AvatarURL string `json:"avatar_url"`
|
||||
}
|
||||
|
||||
// FileTreeChangeEventTypes are the events that can require cache invalidation
|
||||
var FileTreeChangeEventTypes = map[string]struct{}{
|
||||
"ITEM_COPY": {},
|
||||
"ITEM_CREATE": {},
|
||||
"ITEM_MAKE_CURRENT_VERSION": {},
|
||||
"ITEM_MODIFY": {},
|
||||
"ITEM_MOVE": {},
|
||||
"ITEM_RENAME": {},
|
||||
"ITEM_TRASH": {},
|
||||
"ITEM_UNDELETE_VIA_TRASH": {},
|
||||
"ITEM_UPLOAD": {},
|
||||
}
|
||||
|
||||
// Event is an array element in the response returned from /events
|
||||
type Event struct {
|
||||
EventType string `json:"event_type"`
|
||||
EventID string `json:"event_id"`
|
||||
Source Item `json:"source"`
|
||||
}
|
||||
|
||||
// Events is returned from /events
|
||||
type Events struct {
|
||||
ChunkSize int64 `json:"chunk_size"`
|
||||
Entries []Event `json:"entries"`
|
||||
NextStreamPosition int64 `json:"next_stream_position"`
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -45,7 +46,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -76,6 +76,11 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
type boxCustomClaims struct {
|
||||
jwt.StandardClaims
|
||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -102,16 +107,18 @@ func init() {
|
||||
return nil, nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
@@ -142,6 +149,23 @@ func init() {
|
||||
Default: "",
|
||||
Help: "Only show items owned by the login (email address) passed in.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user ID when using a service account.
|
||||
|
||||
Setting this flag allows rclone, when using a JWT service account, to
|
||||
act on behalf of another user by setting the as-user header.
|
||||
|
||||
The user ID is the Box identifier for a user. User IDs can found for
|
||||
any user via the GET /users endpoint, which is only available to
|
||||
admins, or by calling the GET /users/me endpoint with an authenticated
|
||||
user session.
|
||||
|
||||
See: https://developer.box.com/guides/authentication/jwt/as-user/
|
||||
`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -178,7 +202,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(ctx)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -194,34 +218,31 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
return boxConfig, nil
|
||||
}
|
||||
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
"aud": tokenURL,
|
||||
"jti": val,
|
||||
claims = &boxCustomClaims{
|
||||
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
Id: val,
|
||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||
Subject: boxConfig.EnterpriseID,
|
||||
Audience: tokenURL,
|
||||
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
|
||||
},
|
||||
BoxSubType: boxSubType,
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||
signingHeaders := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
||||
signingHeaders := map[string]interface{}{
|
||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
|
||||
return signingHeaders
|
||||
}
|
||||
|
||||
@@ -258,19 +279,29 @@ type Options struct {
|
||||
AccessToken string `config:"access_token"`
|
||||
ListChunk int `config:"list_chunk"`
|
||||
OwnedBy string `config:"owned_by"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
}
|
||||
|
||||
// ItemMeta defines metadata we cache for each Item ID
|
||||
type ItemMeta struct {
|
||||
SequenceID int64 // the most recent event processed for this item
|
||||
ParentID string // ID of the parent directory of this item
|
||||
Name string // leaf name of this item
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
|
||||
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
|
||||
}
|
||||
|
||||
// Object describes a box object
|
||||
@@ -418,12 +449,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
itemMetaCacheMu: new(sync.Mutex),
|
||||
itemMetaCache: make(map[string]ItemMeta),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -436,6 +469,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||
}
|
||||
|
||||
// If using impersonate set an as-user header
|
||||
if f.opt.Impersonate != "" {
|
||||
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
||||
}
|
||||
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
|
||||
@@ -678,6 +716,17 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
|
||||
// Cache some metadata for this Item to help us process events later
|
||||
// on. In particular, the box event API does not provide the old path
|
||||
// of the Item when it is renamed/deleted/moved/etc.
|
||||
f.itemMetaCacheMu.Lock()
|
||||
cachedItemMeta, found := f.itemMetaCache[info.ID]
|
||||
if !found || cachedItemMeta.SequenceID < info.SequenceID {
|
||||
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
|
||||
}
|
||||
f.itemMetaCacheMu.Unlock()
|
||||
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1117,7 +1166,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
var (
|
||||
deleteErrors = int64(0)
|
||||
deleteErrors atomic.Uint64
|
||||
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
@@ -1133,7 +1182,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
||||
atomic.AddInt64(&deleteErrors, 1)
|
||||
deleteErrors.Add(1)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
@@ -1142,12 +1191,250 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
if deleteErrors != 0 {
|
||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
|
||||
if deleteErrors.Load() != 0 {
|
||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
// Automatically restarts itself in case of unexpected behavior of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
go func() {
|
||||
// get the `stream_position` early so all changes from now on get processed
|
||||
streamPosition, err := f.changeNotifyStreamPosition(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
||||
}
|
||||
|
||||
var ticker *time.Ticker
|
||||
var tickerC <-chan time.Time
|
||||
for {
|
||||
select {
|
||||
case pollInterval, ok := <-pollIntervalChan:
|
||||
if !ok {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
ticker, tickerC = nil, nil
|
||||
}
|
||||
if pollInterval != 0 {
|
||||
ticker = time.NewTicker(pollInterval)
|
||||
tickerC = ticker.C
|
||||
}
|
||||
case <-tickerC:
|
||||
if streamPosition == "" {
|
||||
streamPosition, err = f.changeNotifyStreamPosition(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/events",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
opts.Parameters.Set("stream_position", "now")
|
||||
opts.Parameters.Set("stream_type", "changes")
|
||||
|
||||
var result api.Events
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strconv.FormatInt(result.NextStreamPosition, 10), nil
|
||||
}
|
||||
|
||||
// Attempts to construct the full path for an object, given the ID of its
|
||||
// parent directory and the name of the object.
|
||||
//
|
||||
// Can return "" if the parentID is not currently in the directory cache.
|
||||
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
|
||||
fullPath = ""
|
||||
name := f.opt.Enc.ToStandardName(childName)
|
||||
if parentID != "" {
|
||||
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
|
||||
if len(parentDir) > 0 {
|
||||
fullPath = parentDir + "/" + name
|
||||
} else {
|
||||
fullPath = name
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No parent, this object is at the root
|
||||
fullPath = name
|
||||
}
|
||||
return fullPath
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string) (nextStreamPosition string, err error) {
|
||||
nextStreamPosition = streamPosition
|
||||
|
||||
// box can send duplicate Event IDs; filter any in a single notify run
|
||||
processedEventIDs := make(map[string]bool)
|
||||
|
||||
for {
|
||||
limit := f.opt.ListChunk
|
||||
|
||||
// box only allows a max of 500 events
|
||||
if limit > 500 {
|
||||
limit = 500
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/events",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
opts.Parameters.Set("stream_position", nextStreamPosition)
|
||||
opts.Parameters.Set("stream_type", "changes")
|
||||
opts.Parameters.Set("limit", strconv.Itoa(limit))
|
||||
|
||||
var result api.Events
|
||||
var resp *http.Response
|
||||
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if result.ChunkSize != int64(len(result.Entries)) {
|
||||
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
|
||||
}
|
||||
|
||||
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
|
||||
if result.ChunkSize == 0 {
|
||||
return nextStreamPosition, nil
|
||||
}
|
||||
|
||||
type pathToClear struct {
|
||||
path string
|
||||
entryType fs.EntryType
|
||||
}
|
||||
var pathsToClear []pathToClear
|
||||
newEventIDs := 0
|
||||
for _, entry := range result.Entries {
|
||||
if entry.EventID == "" || processedEventIDs[entry.EventID] { // missing Event ID, or already saw this one
|
||||
continue
|
||||
}
|
||||
processedEventIDs[entry.EventID] = true
|
||||
newEventIDs++
|
||||
|
||||
if entry.Source.ID == "" { // missing File or Folder ID
|
||||
continue
|
||||
}
|
||||
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
|
||||
continue
|
||||
}
|
||||
|
||||
// Only interested in event types that result in a file tree change
|
||||
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
|
||||
continue
|
||||
}
|
||||
|
||||
f.itemMetaCacheMu.Lock()
|
||||
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
|
||||
if cachedItemMetaFound {
|
||||
if itemMeta.SequenceID >= entry.Source.SequenceID {
|
||||
// Item in the cache has the same or newer SequenceID than
|
||||
// this event. Ignore this event, it must be old.
|
||||
f.itemMetaCacheMu.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
// This event is newer. Delete its entry from the cache,
|
||||
// we'll notify about its change below, then it's up to a
|
||||
// future list operation to repopulate the cache.
|
||||
delete(f.itemMetaCache, entry.Source.ID)
|
||||
}
|
||||
f.itemMetaCacheMu.Unlock()
|
||||
|
||||
entryType := fs.EntryDirectory
|
||||
if entry.Source.Type == api.ItemTypeFile {
|
||||
entryType = fs.EntryObject
|
||||
}
|
||||
|
||||
// The box event only includes the new path for the object (e.g.
|
||||
// the path after the object was moved). If there was an old path
|
||||
// saved in our cache, it must be cleared.
|
||||
if cachedItemMetaFound {
|
||||
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
|
||||
if path != "" {
|
||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
||||
}
|
||||
|
||||
// If this is a directory, also delete it from the dir cache.
|
||||
// This will effectively invalidate the item metadata cache
|
||||
// entries for all descendents of this directory, since we
|
||||
// will no longer be able to construct a full path for them.
|
||||
// This is exactly what we want, since we don't want to notify
|
||||
// on the paths of these descendents if one of their ancestors
|
||||
// has been renamed/deleted.
|
||||
if entry.Source.Type == api.ItemTypeFolder {
|
||||
f.dirCache.FlushDir(path)
|
||||
}
|
||||
}
|
||||
|
||||
// If the item is "active", then it is not trashed or deleted, so
|
||||
// it potentially has a valid parent.
|
||||
//
|
||||
// Construct the new path of the object, based on the Parent ID
|
||||
// and its name. If we get an empty result, it means we don't
|
||||
// currently know about this object so notification is unnecessary.
|
||||
if entry.Source.ItemStatus == api.ItemStatusActive {
|
||||
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
|
||||
if path != "" {
|
||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// box can sometimes repeatedly return the same Event IDs within a
|
||||
// short period of time. If it stops giving us new ones, treat it
|
||||
// the same as if it returned us none at all.
|
||||
if newEventIDs == 0 {
|
||||
return nextStreamPosition, nil
|
||||
}
|
||||
|
||||
notifiedPaths := make(map[string]bool)
|
||||
for _, p := range pathsToClear {
|
||||
if _, ok := notifiedPaths[p.path]; ok {
|
||||
continue
|
||||
}
|
||||
notifiedPaths[p.path] = true
|
||||
notifyFunc(p.path, p.entryType)
|
||||
}
|
||||
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
|
||||
}
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
|
||||
16
backend/cache/cache.go
vendored
16
backend/cache/cache.go
vendored
@@ -76,17 +76,19 @@ func init() {
|
||||
Name: "plex_url",
|
||||
Help: "The URL of the Plex server.",
|
||||
}, {
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "plex_password",
|
||||
Help: "The password of the Plex user.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "plex_insecure",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
||||
@@ -1787,7 +1789,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// StopBackgroundRunners will signall all the runners to stop their work
|
||||
// StopBackgroundRunners will signal all the runners to stop their work
|
||||
// can be triggered from a terminate signal or from testing between runs
|
||||
func (f *Fs) StopBackgroundRunners() {
|
||||
f.cleanupChan <- false
|
||||
|
||||
21
backend/cache/cache_internal_test.go
vendored
21
backend/cache/cache_internal_test.go
vendored
@@ -1098,27 +1098,6 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = in.Close()
|
||||
}()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = out.Close()
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
|
||||
4
backend/cache/cache_upload_test.go
vendored
4
backend/cache/cache_upload_test.go
vendored
@@ -160,11 +160,11 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
@@ -40,6 +40,7 @@ func TestIntegration(t *testing.T) {
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
"OpenWriterAt",
|
||||
"OpenChunkWriter",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"UserInfo",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package combine implents a backend to combine multiple remotes in a directory tree
|
||||
// Package combine implements a backend to combine multiple remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
@@ -233,6 +233,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
for _, u := range f.upstreams {
|
||||
@@ -289,6 +290,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// Enable CleanUp when any upstreams support it
|
||||
if features.CleanUp == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().CleanUp != nil {
|
||||
features.CleanUp = f.CleanUp
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable ChangeNotify when any upstreams support it
|
||||
if features.ChangeNotify == nil {
|
||||
for _, u := range f.upstreams {
|
||||
@@ -299,6 +310,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// show that we wrap other backends
|
||||
features.Overlay = true
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
@@ -351,7 +365,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// join the elements together but unline path.Join return empty string
|
||||
// join the elements together but unlike path.Join return empty string
|
||||
func join(elem ...string) string {
|
||||
result := path.Join(elem...)
|
||||
if result == "." {
|
||||
@@ -887,6 +901,100 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
})
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
do := u.f.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uRemote, expire, unlink)
|
||||
}
|
||||
|
||||
// PutUnchecked in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
//
|
||||
// May create duplicates or return errors if src already
|
||||
// exists.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
u, uRemote, err := f.findUpstream(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do := u.f.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
uSrc := fs.NewOverrideRemote(src, uRemote)
|
||||
return do(ctx, in, uSrc, options...)
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
u *upstream
|
||||
uDirs []fs.Directory
|
||||
)
|
||||
for _, dir := range dirs {
|
||||
uNew, uDir, err := f.findUpstream(dir.Remote())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u == nil {
|
||||
u = uNew
|
||||
} else if u != uNew {
|
||||
return fmt.Errorf("can't merge directories from different upstreams")
|
||||
}
|
||||
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
|
||||
}
|
||||
do := u.f.Features().MergeDirs
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uDirs)
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
if do := u.f.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do := u.f.Features().OpenWriterAt
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uRemote, size)
|
||||
}
|
||||
|
||||
// Object describes a wrapped Object
|
||||
//
|
||||
// This is a wrapped Object which knows its path prefix
|
||||
@@ -916,7 +1024,7 @@ func (o *Object) String() string {
|
||||
func (o *Object) Remote() string {
|
||||
newPath, err := o.u.pathAdjustment.do(o.Object.String())
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Bad object: %v", err)
|
||||
fs.Errorf(o.Object, "Bad object: %v", err)
|
||||
return err.Error()
|
||||
}
|
||||
return newPath
|
||||
@@ -988,5 +1096,10 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -10,6 +10,11 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
@@ -17,8 +22,8 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,7 +40,9 @@ func TestLocal(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -51,7 +58,9 @@ func TestMemory(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -68,6 +77,8 @@ func TestMixed(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -186,6 +186,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// We support reading MIME types no matter the wrapped fs
|
||||
f.features.ReadMimeType = true
|
||||
@@ -256,6 +257,16 @@ func isMetadataFile(filename string) bool {
|
||||
return strings.HasSuffix(filename, metaFileExt)
|
||||
}
|
||||
|
||||
// Checks whether a file is a metadata file and returns the original
|
||||
// file name and a flag indicating whether it was a metadata file or
|
||||
// not.
|
||||
func unwrapMetadataFile(filename string) (string, bool) {
|
||||
if !isMetadataFile(filename) {
|
||||
return "", false
|
||||
}
|
||||
return filename[:len(filename)-len(metaFileExt)], true
|
||||
}
|
||||
|
||||
// makeDataName generates the file name for a data file with specified compression mode
|
||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||
if mode != Uncompressed {
|
||||
@@ -978,7 +989,8 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
fs.Logf(f, "path %q entryType %d", path, entryType)
|
||||
var (
|
||||
wrappedPath string
|
||||
wrappedPath string
|
||||
isMetadataFile bool
|
||||
)
|
||||
switch entryType {
|
||||
case fs.EntryDirectory:
|
||||
@@ -986,7 +998,10 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
case fs.EntryObject:
|
||||
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
||||
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
||||
wrappedPath = makeMetadataName(path)
|
||||
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
|
||||
if !isMetadataFile {
|
||||
return
|
||||
}
|
||||
default:
|
||||
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||
return
|
||||
|
||||
@@ -14,23 +14,26 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var defaultOpt = fstests.Opt{
|
||||
RemoteName: "TestCompress:",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"OpenChunkWriter",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
UnimplementableObjectMethods: []string{},
|
||||
}
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
UnimplementableObjectMethods: []string{}}
|
||||
fstests.Run(t, &opt)
|
||||
fstests.Run(t, &defaultOpt)
|
||||
}
|
||||
|
||||
// TestRemoteGzip tests GZIP compression
|
||||
@@ -40,27 +43,13 @@ func TestRemoteGzip(t *testing.T) {
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||
name := "TestCompressGzip"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
opt := defaultOpt
|
||||
opt.RemoteName = name + ":"
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
}
|
||||
opt.QuickTestOK = true
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
@@ -37,7 +38,6 @@ const (
|
||||
blockHeaderSize = secretbox.Overhead
|
||||
blockDataSize = 64 * 1024
|
||||
blockSize = blockHeaderSize + blockDataSize
|
||||
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
|
||||
)
|
||||
|
||||
// Errors returned by cipher
|
||||
@@ -53,8 +53,9 @@ var (
|
||||
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
||||
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
||||
ErrorFileClosed = errors.New("file already closed")
|
||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
|
||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
|
||||
ErrorBadSeek = errors.New("Seek beyond end of file")
|
||||
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
|
||||
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
||||
obfuscQuoteRune = '!'
|
||||
)
|
||||
@@ -169,27 +170,30 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
passBadBlocks bool // if set passed bad blocks as zeroed blocks
|
||||
encryptedSuffix string
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
encryptedSuffix: ".bin",
|
||||
}
|
||||
c.buffers.New = func() interface{} {
|
||||
return make([]byte, blockSize)
|
||||
return new([blockSize]byte)
|
||||
}
|
||||
err := c.Key(password, salt)
|
||||
if err != nil {
|
||||
@@ -198,11 +202,29 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// setEncryptedSuffix set suffix, or an empty string
|
||||
func (c *Cipher) setEncryptedSuffix(suffix string) {
|
||||
if strings.EqualFold(suffix, "none") {
|
||||
c.encryptedSuffix = ""
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(suffix, ".") {
|
||||
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
|
||||
suffix = "." + suffix
|
||||
}
|
||||
c.encryptedSuffix = suffix
|
||||
}
|
||||
|
||||
// Call to set bad block pass through
|
||||
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
|
||||
c.passBadBlocks = passBadBlocks
|
||||
}
|
||||
|
||||
// Key creates all the internal keys from the password passed in using
|
||||
// scrypt.
|
||||
//
|
||||
// If salt is "" we use a fixed salt just to make attackers lives
|
||||
// slighty harder than using no salt.
|
||||
// slightly harder than using no salt.
|
||||
//
|
||||
// Note that empty password makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
@@ -230,15 +252,12 @@ func (c *Cipher) Key(password, salt string) (err error) {
|
||||
}
|
||||
|
||||
// getBlock gets a block from the pool of size blockSize
|
||||
func (c *Cipher) getBlock() []byte {
|
||||
return c.buffers.Get().([]byte)
|
||||
func (c *Cipher) getBlock() *[blockSize]byte {
|
||||
return c.buffers.Get().(*[blockSize]byte)
|
||||
}
|
||||
|
||||
// putBlock returns a block to the pool of size blockSize
|
||||
func (c *Cipher) putBlock(buf []byte) {
|
||||
if len(buf) != blockSize {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
@@ -508,7 +527,7 @@ func (c *Cipher) encryptFileName(in string) string {
|
||||
// EncryptFileName encrypts a file path
|
||||
func (c *Cipher) EncryptFileName(in string) string {
|
||||
if c.mode == NameEncryptionOff {
|
||||
return in + encryptedSuffix
|
||||
return in + c.encryptedSuffix
|
||||
}
|
||||
return c.encryptFileName(in)
|
||||
}
|
||||
@@ -568,8 +587,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
// DecryptFileName decrypts a file path
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||
remainingLength := len(in) - len(c.encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
decrypted := in[:remainingLength]
|
||||
@@ -609,7 +628,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
||||
// fromReader fills the nonce from an io.Reader - normally the OSes
|
||||
// crypto random number generator
|
||||
func (n *nonce) fromReader(in io.Reader) error {
|
||||
read, err := io.ReadFull(in, (*n)[:])
|
||||
read, err := readers.ReadFill(in, (*n)[:])
|
||||
if read != fileNonceSize {
|
||||
return fmt.Errorf("short read of nonce: %w", err)
|
||||
}
|
||||
@@ -664,8 +683,8 @@ type encrypter struct {
|
||||
in io.Reader
|
||||
c *Cipher
|
||||
nonce nonce
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -690,9 +709,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
||||
}
|
||||
}
|
||||
// Copy magic into buffer
|
||||
copy(fh.buf, fileMagicBytes)
|
||||
copy((*fh.buf)[:], fileMagicBytes)
|
||||
// Copy nonce into buffer
|
||||
copy(fh.buf[fileMagicSize:], fh.nonce[:])
|
||||
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
@@ -707,22 +726,20 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.bufIndex >= fh.bufSize {
|
||||
// Read data
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := fh.readBuf[:blockDataSize]
|
||||
n, err = io.ReadFull(fh.in, readBuf)
|
||||
readBuf := (*fh.readBuf)[:blockDataSize]
|
||||
n, err = readers.ReadFill(fh.in, readBuf)
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return fh.finish(err)
|
||||
}
|
||||
// possibly err != nil here, but we will process the
|
||||
// data and the next call to ReadFull will return 0, err
|
||||
// data and the next call to ReadFill will return 0, err
|
||||
// Encrypt the block using the nonce
|
||||
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = blockHeaderSize + n
|
||||
fh.nonce.increment()
|
||||
}
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
|
||||
fh.bufIndex += n
|
||||
return n, nil
|
||||
}
|
||||
@@ -763,8 +780,8 @@ type decrypter struct {
|
||||
nonce nonce
|
||||
initialNonce nonce
|
||||
c *Cipher
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -782,12 +799,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
limit: -1,
|
||||
}
|
||||
// Read file header (magic + nonce)
|
||||
readBuf := fh.readBuf[:fileHeaderSize]
|
||||
_, err := io.ReadFull(fh.rc, readBuf)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
readBuf := (*fh.readBuf)[:fileHeaderSize]
|
||||
n, err := readers.ReadFill(fh.rc, readBuf)
|
||||
if n < fileHeaderSize && err == io.EOF {
|
||||
// This read from 0..fileHeaderSize-1 bytes
|
||||
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
||||
} else if err != nil {
|
||||
} else if err != io.EOF && err != nil {
|
||||
return nil, fh.finishAndClose(err)
|
||||
}
|
||||
// check the magic
|
||||
@@ -845,10 +862,8 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
|
||||
func (fh *decrypter) fillBuffer() (err error) {
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := fh.readBuf
|
||||
n, err := io.ReadFull(fh.rc, readBuf)
|
||||
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return err
|
||||
}
|
||||
// possibly err != nil here, but we will process the data and
|
||||
@@ -856,18 +871,25 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
|
||||
// Check header + 1 byte exists
|
||||
if n <= blockHeaderSize {
|
||||
if err != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedFileBadHeader
|
||||
}
|
||||
// Decrypt the block using the nonce
|
||||
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
if !ok {
|
||||
if err != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedBadBlock
|
||||
if !fh.c.passBadBlocks {
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
(*fh.buf)[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = n - blockHeaderSize
|
||||
@@ -893,7 +915,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
||||
toCopy = int(fh.limit)
|
||||
}
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
fh.bufIndex += n
|
||||
if fh.limit >= 0 {
|
||||
fh.limit -= int64(n)
|
||||
@@ -904,9 +926,8 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// calculateUnderlying converts an (offset, limit) in a crypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying
|
||||
// file.
|
||||
// calculateUnderlying converts an (offset, limit) in an encrypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying file.
|
||||
//
|
||||
// It also returns number of bytes to discard after reading the first
|
||||
// block and number of blocks this is from the start so the nonce can
|
||||
|
||||
@@ -27,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
|
||||
{"off", NameEncryptionOff, ""},
|
||||
{"standard", NameEncryptionStandard, ""},
|
||||
{"obfuscate", NameEncryptionObfuscated, ""},
|
||||
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
|
||||
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
|
||||
} {
|
||||
actual, actualErr := NewNameEncryptionMode(test.in)
|
||||
assert.Equal(t, actual, test.expected)
|
||||
if test.expectedErr == "" {
|
||||
assert.NoError(t, actualErr)
|
||||
} else {
|
||||
assert.Error(t, actualErr, test.expectedErr)
|
||||
assert.EqualError(t, actualErr, test.expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -405,6 +405,13 @@ func TestNonStandardEncryptFileName(t *testing.T) {
|
||||
// Off mode
|
||||
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Off mode with custom suffix
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
c.setEncryptedSuffix(".jpg")
|
||||
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
|
||||
// Off mode with empty suffix
|
||||
c.setEncryptedSuffix("none")
|
||||
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
@@ -483,21 +490,27 @@ func TestNonStandardDecryptFileName(t *testing.T) {
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
customSuffix string
|
||||
}{
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"},
|
||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
if test.customSuffix != "" {
|
||||
c.setEncryptedSuffix(test.customSuffix)
|
||||
}
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
@@ -726,7 +739,7 @@ func TestNonceFromReader(t *testing.T) {
|
||||
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
|
||||
buf = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
err = x.fromReader(buf)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
}
|
||||
|
||||
func TestNonceFromBuf(t *testing.T) {
|
||||
@@ -1050,7 +1063,7 @@ func TestRandomSource(t *testing.T) {
|
||||
_, _ = source.Read(buf)
|
||||
sink = newRandomSource(1e8)
|
||||
_, err = io.Copy(sink, source)
|
||||
assert.Error(t, err, "Error in stream")
|
||||
assert.EqualError(t, err, "Error in stream at 1")
|
||||
}
|
||||
|
||||
type zeroes struct{}
|
||||
@@ -1167,13 +1180,13 @@ func TestNewEncrypter(t *testing.T) {
|
||||
fh, err := c.newEncrypter(z, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
|
||||
|
||||
// Test error path
|
||||
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
fh, err = c.newEncrypter(z, nil)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
}
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||
@@ -1224,7 +1237,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
|
||||
@@ -1232,7 +1245,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd = newCloseDetector(er)
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "potato")
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// bad magic
|
||||
@@ -1243,7 +1256,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0copy))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
file0copy[i] ^= 0x1
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
@@ -1495,8 +1508,10 @@ func TestDecrypterRead(t *testing.T) {
|
||||
case i == fileHeaderSize:
|
||||
// This would normally produce an error *except* on the first block
|
||||
expectedErr = nil
|
||||
case i <= fileHeaderSize+blockHeaderSize:
|
||||
expectedErr = ErrorEncryptedFileBadHeader
|
||||
default:
|
||||
expectedErr = io.ErrUnexpectedEOF
|
||||
expectedErr = ErrorEncryptedBadBlock
|
||||
}
|
||||
if expectedErr != nil {
|
||||
assert.EqualError(t, err, expectedErr.Error(), what)
|
||||
@@ -1514,7 +1529,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.NoError(t, err)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.Error(t, err, "potato")
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
// Test corrupting the input
|
||||
@@ -1525,15 +1540,26 @@ func TestDecrypterRead(t *testing.T) {
|
||||
file16copy[i] ^= 0xFF
|
||||
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
if i < fileMagicSize {
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Nil(t, fh)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
|
||||
}
|
||||
file16copy[i] ^= 0xFF
|
||||
}
|
||||
|
||||
// Test that we can corrupt a byte and read zeroes if
|
||||
// passBadBlocks is set
|
||||
copy(file16copy, file16)
|
||||
file16copy[len(file16copy)-1] ^= 0xFF
|
||||
c.passBadBlocks = true
|
||||
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
assert.NoError(t, err)
|
||||
buf, err := io.ReadAll(fh)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, make([]byte, 16), buf)
|
||||
}
|
||||
|
||||
func TestDecrypterClose(t *testing.T) {
|
||||
@@ -1554,7 +1580,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
|
||||
// double close
|
||||
err = fh.Close()
|
||||
assert.Error(t, err, ErrorFileClosed.Error())
|
||||
assert.EqualError(t, err, ErrorFileClosed.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// try again reading the file this time
|
||||
@@ -1581,8 +1607,6 @@ func TestPutGetBlock(t *testing.T) {
|
||||
block := c.getBlock()
|
||||
c.putBlock(block)
|
||||
c.putBlock(block)
|
||||
|
||||
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
|
||||
@@ -48,7 +48,7 @@ func init() {
|
||||
Help: "Very simple filename obfuscation.",
|
||||
}, {
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -79,7 +79,9 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
|
||||
Normally this option is not what you want, but if you have two crypts
|
||||
pointing to the same backend you can use it.
|
||||
@@ -119,6 +121,15 @@ names, or for debugging purposes.`,
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "pass_bad_blocks",
|
||||
Help: `If set this will pass bad blocks through as all 0.
|
||||
|
||||
This should not be set in normal operation, it should only be set if
|
||||
trying to recover an encrypted file with errors and it is desired to
|
||||
recover as much of the file as possible.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
@@ -138,10 +149,18 @@ length and if it's case sensitive.`,
|
||||
},
|
||||
{
|
||||
Value: "base32768",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "suffix",
|
||||
Help: `If this is set it will override the default suffix of ".bin".
|
||||
|
||||
Setting suffix to "none" will result in an empty suffix. This may be useful
|
||||
when the path length is critical.`,
|
||||
Default: ".bin",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -174,6 +193,8 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
}
|
||||
cipher.setEncryptedSuffix(opt.Suffix)
|
||||
cipher.setPassBadBlocks(opt.PassBadBlocks)
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
@@ -247,6 +268,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
@@ -262,7 +284,9 @@ type Options struct {
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
Suffix string `config:"suffix"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
@@ -454,7 +478,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
|
||||
@@ -202,7 +202,7 @@ func init() {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
|
||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
||||
OAuth2Config: driveConfig,
|
||||
})
|
||||
@@ -277,20 +277,23 @@ Leave blank normally.
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth_owner_only",
|
||||
Default: false,
|
||||
@@ -416,10 +419,11 @@ date is used.`,
|
||||
Help: "Size of listing chunk 100-1000, 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
@@ -499,7 +503,9 @@ need to use --ignore size also.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||
|
||||
This can be useful if you wish to do a server-side copy between two
|
||||
different Google drives. Note that this isn't enabled by default
|
||||
@@ -588,9 +594,32 @@ This resource key requirement only applies to a subset of old files.
|
||||
|
||||
Note also that opening the folder once in the web interface (with the
|
||||
user you've authenticated rclone with) seems to be enough so that the
|
||||
resource key is no needed.
|
||||
resource key is not needed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "fast_list_bug_fix",
|
||||
Help: `Work around a bug in Google Drive listing.
|
||||
|
||||
Normally rclone will work around a bug in Google Drive when using
|
||||
--fast-list (ListR) where the search "(A in parents) or (B in
|
||||
parents)" returns nothing sometimes. See #3114, #4289 and
|
||||
https://issuetracker.google.com/issues/149522397
|
||||
|
||||
Rclone detects this by finding no items in more than one directory
|
||||
when listing and retries them as lists of individual directories.
|
||||
|
||||
This means that if you have a lot of empty directories rclone will end
|
||||
up listing them all individually and this can take many more API
|
||||
calls.
|
||||
|
||||
This flag allows the work-around to be disabled. This is **not**
|
||||
recommended in normal use - only if you have a particular case you are
|
||||
having trouble with like many empty directories.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -598,6 +627,18 @@ resource key is no needed.
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// Don't encode / as it's a valid name character in drive.
|
||||
Default: encoder.EncodeInvalidUtf8,
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
|
||||
@@ -653,7 +694,9 @@ type Options struct {
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
FastListBugFix bool `config:"fast_list_bug_fix"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -1122,6 +1165,12 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
oAuthClient, err = google.DefaultClient(ctx, scopes...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create client from environment: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||
if err != nil {
|
||||
@@ -1493,6 +1542,9 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1862,7 +1914,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
||||
// drive where (A in parents) or (B in parents) returns nothing
|
||||
// sometimes. See #3114, #4289 and
|
||||
// https://issuetracker.google.com/issues/149522397
|
||||
if len(dirs) > 1 && !foundItems {
|
||||
if f.opt.FastListBugFix && len(dirs) > 1 && !foundItems {
|
||||
if atomic.SwapInt32(&f.grouping, 1) != 1 {
|
||||
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
|
||||
}
|
||||
@@ -2880,6 +2932,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
changesCall.Spaces("appDataFolder")
|
||||
}
|
||||
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -3861,7 +3914,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
@@ -140,55 +139,12 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
return complete, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
if launchBatchStatus.AsyncJobId == "" {
|
||||
return nil, errors.New("wait for batch completion: empty job ID")
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxSleepTime = 1 * time.Second
|
||||
startTime := time.Now()
|
||||
try := 1
|
||||
for {
|
||||
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
|
||||
if remaining < 0 {
|
||||
break
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > maxSleepTime {
|
||||
sleepTime = maxSleepTime
|
||||
}
|
||||
try++
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||
// If commit fails then signal clients if sync
|
||||
var signalled = b.async
|
||||
defer func() {
|
||||
if err != nil && signalled {
|
||||
if err != nil && !signalled {
|
||||
// Signal to clients that there was an error
|
||||
for _, result := range results {
|
||||
result <- batcherResponse{err: err}
|
||||
|
||||
@@ -58,7 +58,7 @@ import (
|
||||
const (
|
||||
rcloneClientID = "5jcck7diasz0rqy"
|
||||
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
|
||||
minSleep = 10 * time.Millisecond
|
||||
defaultMinSleep = fs.Duration(10 * time.Millisecond)
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
@@ -182,8 +182,9 @@ client_secret) to use this option as currently rclone's default set of
|
||||
permissions doesn't include "members.read". This can be added once
|
||||
v1.55 or later is in use everywhere.
|
||||
`,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "shared_files",
|
||||
Help: `Instructs rclone to work on individual shared files.
|
||||
@@ -260,8 +261,8 @@ uploaded.
|
||||
The default for this is 0 which means rclone will choose a sensible
|
||||
default based on the batch_mode in use.
|
||||
|
||||
- batch_mode: async - default batch_timeout is 500ms
|
||||
- batch_mode: sync - default batch_timeout is 10s
|
||||
- batch_mode: async - default batch_timeout is 10s
|
||||
- batch_mode: sync - default batch_timeout is 500ms
|
||||
- batch_mode: off - not in use
|
||||
`,
|
||||
Default: fs.Duration(0),
|
||||
@@ -271,6 +272,11 @@ default based on the batch_mode in use.
|
||||
Help: `Max time to wait for a batch to finish committing`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pacer_min_sleep",
|
||||
Default: defaultMinSleep,
|
||||
Help: "Minimum time to sleep between API calls.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -299,6 +305,7 @@ type Options struct {
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -442,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
||||
if err != nil {
|
||||
@@ -536,7 +543,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// if the moint failed we have to abort here
|
||||
// if the mount failed we have to abort here
|
||||
}
|
||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||
// we disable shared folder mode and proceed normally
|
||||
@@ -719,7 +726,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -906,7 +913,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||
|
||||
@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
|
||||
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
|
||||
|
||||
func parseFichierError(err error) int {
|
||||
matches := errorRegex.FindStringSubmatch(err.Error())
|
||||
if len(matches) == 0 {
|
||||
return 0
|
||||
}
|
||||
code, err := strconv.Atoi(matches[0])
|
||||
code, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
||||
return 0
|
||||
@@ -118,6 +118,9 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
Single: 1,
|
||||
Pass: f.opt.FilePassword,
|
||||
}
|
||||
if f.opt.CDN {
|
||||
request.CDN = 1
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/download/get_token.cgi",
|
||||
@@ -405,6 +408,32 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
|
||||
request := &MoveDirRequest{
|
||||
FolderID: folderID,
|
||||
DestinationFolderID: destinationFolderID,
|
||||
Rename: newLeaf,
|
||||
// DestinationUser: destinationUser,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/mv.cgi",
|
||||
}
|
||||
|
||||
response = &MoveDirResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move dir: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
||||
request := &CopyFileRequest{
|
||||
URLs: []string{url},
|
||||
@@ -473,7 +502,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("didnt got an upload node: %w", err)
|
||||
return nil, fmt.Errorf("didn't get an upload node: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
|
||||
@@ -38,8 +38,9 @@ func init() {
|
||||
Description: "1Fichier",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
@@ -54,6 +55,11 @@ func init() {
|
||||
Name: "folder_password",
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "Set if you wish to use CDN download links.",
|
||||
Name: "cdn",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -89,6 +95,7 @@ type Options struct {
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
FilePassword string `config:"file_password"`
|
||||
FolderPassword string `config:"folder_password"`
|
||||
CDN bool `config:"cdn"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -333,7 +340,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
return nil, errors.New("File too big, can't upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
@@ -481,6 +488,51 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove.
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists.
|
||||
//
|
||||
// This is complicated by the fact that we can't use moveDir to move
|
||||
// to a different directory AND rename at the same time as it can
|
||||
// overwrite files in the source directory.
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcIDnumeric, err := strconv.Atoi(srcID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var resp *MoveDirResponse
|
||||
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't rename leaf: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
|
||||
}
|
||||
|
||||
srcFs.dirCache.FlushDir(srcRemote)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side move operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
@@ -554,6 +606,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
|
||||
@@ -20,6 +20,7 @@ type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
CDN int `json:"cdn,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
@@ -69,6 +70,22 @@ type MoveFileResponse struct {
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// MoveDirRequest is the request structure of the corresponding request
|
||||
type MoveDirRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
DestinationFolderID int `json:"destination_folder_id,omitempty"`
|
||||
DestinationUser string `json:"destination_user"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// MoveDirResponse is the response structure of the corresponding request
|
||||
type MoveDirResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
OldName string `json:"old_name"`
|
||||
NewName string `json:"new_name"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
type CopyFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
|
||||
@@ -84,6 +84,7 @@ Leave blank normally.
|
||||
|
||||
Fill in to make rclone start with directory of a given ID.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "permanent_token",
|
||||
Help: `Permanent Authentication Token.
|
||||
@@ -97,6 +98,7 @@ These tokens are normally valid for several years.
|
||||
|
||||
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "token",
|
||||
Help: `Session Token.
|
||||
@@ -106,7 +108,8 @@ usually valid for 1 hour.
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "token_expiry",
|
||||
Help: `Token expiry time.
|
||||
@@ -155,9 +158,9 @@ type Fs struct {
|
||||
tokenMu sync.Mutex // hold when reading the token
|
||||
token string // current access token
|
||||
tokenExpiry time.Time // time the current token expires
|
||||
tokenExpired int32 // read and written with atomic
|
||||
canCopyWithName bool // set if detected that can use fi_name in copy
|
||||
precision time.Duration // precision reported
|
||||
tokenExpired atomic.Int32
|
||||
canCopyWithName bool // set if detected that can use fi_name in copy
|
||||
precision time.Duration // precision reported
|
||||
}
|
||||
|
||||
// Object describes a filefabric object
|
||||
@@ -240,7 +243,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
|
||||
err = status // return the error from the RPC
|
||||
code := status.GetCode()
|
||||
if code == "login_token_expired" {
|
||||
atomic.AddInt32(&f.tokenExpired, 1)
|
||||
f.tokenExpired.Add(1)
|
||||
} else {
|
||||
for _, retryCode := range retryStatusCodes {
|
||||
if code == retryCode.code {
|
||||
@@ -320,12 +323,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
||||
var refreshed = false
|
||||
defer func() {
|
||||
if refreshed {
|
||||
atomic.StoreInt32(&f.tokenExpired, 0)
|
||||
f.tokenExpired.Store(0)
|
||||
}
|
||||
f.tokenMu.Unlock()
|
||||
}()
|
||||
|
||||
expired := atomic.LoadInt32(&f.tokenExpired) != 0
|
||||
expired := f.tokenExpired.Load() != 0
|
||||
if expired {
|
||||
fs.Debugf(f, "Token invalid - refreshing")
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/proxy"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
@@ -48,13 +49,15 @@ func init() {
|
||||
Description: "FTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port number.",
|
||||
@@ -172,6 +175,18 @@ Enabled by default. Use 0 to disable.`,
|
||||
If this is set and no password is supplied then rclone will ask for a password
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "socks_proxy",
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -216,6 +231,7 @@ type Options struct {
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -233,7 +249,6 @@ type Fs struct {
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
tlsConf *tls.Config
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
@@ -315,10 +330,17 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Return a *textproto.Error if err contains one or nil otherwise
|
||||
func textprotoError(err error) (errX *textproto.Error) {
|
||||
if errors.As(err, &errX) {
|
||||
return errX
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if this FTP error should be retried
|
||||
func isRetriableFtpError(err error) bool {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
|
||||
return true
|
||||
@@ -339,10 +361,36 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Get a TLS config with a unique session cache.
|
||||
//
|
||||
// We can't share session caches between connections.
|
||||
//
|
||||
// See: https://github.com/rclone/rclone/issues/7234
|
||||
func (f *Fs) tlsConfig() *tls.Config {
|
||||
var tlsConfig *tls.Config
|
||||
if f.opt.TLS || f.opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
if f.opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
||||
}
|
||||
if f.opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
}
|
||||
return tlsConfig
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
|
||||
// tls.Config for this connection only. Will be used for data
|
||||
// and control connections.
|
||||
tlsConfig := f.tlsConfig()
|
||||
|
||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||
initialConnection := true
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
@@ -350,12 +398,17 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
}()
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Connect using cleartext only for non TLS
|
||||
if f.tlsConf == nil {
|
||||
if tlsConfig == nil {
|
||||
return conn, nil
|
||||
}
|
||||
// Initial connection only needs to be cleartext for explicit TLS
|
||||
@@ -364,7 +417,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
return conn, nil
|
||||
}
|
||||
// Upgrade connection to TLS
|
||||
tlsConn := tls.Client(conn, f.tlsConf)
|
||||
tlsConn := tls.Client(conn, tlsConfig)
|
||||
// Do the initial handshake - tls.Client doesn't do it for us
|
||||
// If we do this then connections to proftpd/pureftpd lock up
|
||||
// See: https://github.com/rclone/rclone/issues/6426
|
||||
@@ -386,9 +439,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.TLS {
|
||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||
// as a trigger for sending PSBZ and PROT options to server.
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
@@ -471,8 +524,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
var tpErr *textproto.Error
|
||||
if !errors.As(err, &tpErr) {
|
||||
if tpErr := textprotoError(err); tpErr != nil {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
@@ -544,19 +596,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
if opt.TLS && opt.ExplicitTLS {
|
||||
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
||||
}
|
||||
var tlsConfig *tls.Config
|
||||
if opt.TLS || opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: opt.Host,
|
||||
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||
}
|
||||
if opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
|
||||
}
|
||||
if opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
@@ -569,11 +608,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
tlsConf: tlsConfig,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
@@ -621,8 +660,7 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
@@ -633,8 +671,7 @@ func translateErrorFile(err error) error {
|
||||
|
||||
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
||||
func translateErrorDir(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
@@ -688,6 +725,12 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusBadArguments:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil {
|
||||
@@ -925,8 +968,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
@@ -1095,7 +1137,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if !o.fs.fSetTime {
|
||||
fs.Errorf(o.fs, "SetModTime is not supported")
|
||||
fs.Debugf(o.fs, "SetModTime is not supported")
|
||||
return nil
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
@@ -1167,8 +1209,7 @@ func (f *ftpReadCloser) Close() error {
|
||||
// mask the error if it was caused by a premature close
|
||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
err = nil
|
||||
@@ -1246,13 +1287,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
if err != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -91,15 +91,21 @@ func init() {
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user_project",
|
||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
@@ -298,6 +304,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "directory_markers",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Upload an empty object with a trailing slash when a new directory is created
|
||||
|
||||
Empty folders are unsupported for bucket based remotes, this option creates an empty
|
||||
object ending with "/", to persist the folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
@@ -349,6 +364,7 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
UserProject string `config:"user_project"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
Anonymous bool `config:"anonymous"`
|
||||
@@ -362,6 +378,7 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -457,7 +474,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -543,6 +560,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
@@ -559,7 +579,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = get.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -619,9 +643,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
directory += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
if !recurse {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
foundItems := 0
|
||||
for {
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -637,6 +665,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
return err
|
||||
}
|
||||
if !recurse {
|
||||
foundItems += len(objects.Prefixes)
|
||||
var object storage.Object
|
||||
for _, remote := range objects.Prefixes {
|
||||
if !strings.HasSuffix(remote, "/") {
|
||||
@@ -657,22 +686,29 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
}
|
||||
}
|
||||
foundItems += len(objects.Items)
|
||||
for _, object := range objects.Items {
|
||||
remote := f.opt.Enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
|
||||
err = fn(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -682,6 +718,17 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
list.PageToken(objects.NextPageToken)
|
||||
}
|
||||
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
|
||||
// Determine whether the directory exists or not by whether it has a marker
|
||||
_, err := f.readObjectInfo(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -725,6 +772,9 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
listBuckets = listBuckets.UserProject(f.opt.UserProject)
|
||||
}
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -842,10 +892,69 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Create directory marker file and parents
|
||||
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
|
||||
if !f.opt.DirectoryMarkers || bucket == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object to be uploaded
|
||||
o := &Object{
|
||||
fs: f,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
|
||||
for {
|
||||
_, bucketPath := f.split(dir)
|
||||
// Don't create the directory marker if it is the bucket or at the very root
|
||||
if bucketPath == "" {
|
||||
break
|
||||
}
|
||||
o.remote = dir + "/"
|
||||
|
||||
// Check to see if object already exists
|
||||
_, err := o.readObjectInfo(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload it if not
|
||||
fs.Debugf(o, "Creating directory marker")
|
||||
content := io.Reader(strings.NewReader(""))
|
||||
err = o.Update(ctx, content, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating directory marker failed: %w", err)
|
||||
}
|
||||
|
||||
// Now check parent directory exists
|
||||
dir = path.Dir(dir)
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
e := f.checkBucket(ctx, bucket)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
return f.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
@@ -854,7 +963,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = list.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -889,7 +1002,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
insertBucket = insertBucket.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
insertBucket = insertBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
@@ -909,12 +1026,28 @@ func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
}
|
||||
fs.Debugf(o, "Removing directory marker")
|
||||
err := o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing directory marker failed: %w", err)
|
||||
}
|
||||
}
|
||||
if bucket == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
@@ -936,7 +1069,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.checkBucket(ctx, dstBucket)
|
||||
err := f.mkdirParent(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -960,7 +1093,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
rewriteRequest = rewriteRequest.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
rewriteRequest.UserProject(f.opt.UserProject)
|
||||
}
|
||||
rewriteResponse, err = rewriteRequest.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1070,8 +1207,17 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return o.fs.readObjectInfo(ctx, bucket, bucketPath)
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
object, err = get.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1143,7 +1289,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
copyObject = copyObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = copyObject.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1160,6 +1310,9 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.UserProject != "" {
|
||||
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1203,11 +1356,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.checkBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
if !strings.HasSuffix(o.remote, "/") {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
@@ -1252,7 +1408,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
insertObject = insertObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1267,7 +1427,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -16,3 +17,17 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestGoogleCloudStorage"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -166,6 +166,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*hasher.Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"OpenChunkWriter",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{},
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
// Fs represents a HDFS server
|
||||
@@ -31,8 +32,15 @@ type Fs struct {
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
client *hdfs.Client
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
const (
|
||||
minSleep = 20 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||
func getKerberosClient() (*krb.Client, error) {
|
||||
configPath := os.Getenv("KRB5_CONFIG")
|
||||
@@ -114,6 +122,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
ci: fs.GetConfig(ctx),
|
||||
client: client,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -19,9 +19,10 @@ func init() {
|
||||
Description: "Hadoop distributed file system",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
@@ -29,6 +30,7 @@ func init() {
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_principal_name",
|
||||
Help: `Kerberos service principal name for the namenode.
|
||||
@@ -36,15 +38,16 @@ func init() {
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
checks, and wire encryption are required when communicating with
|
||||
the datanodes. Possible values are 'authentication', 'integrity'
|
||||
and 'privacy'. Used only with KERBEROS enabled.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
|
||||
@@ -5,10 +5,12 @@ package hdfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/colinmarc/hdfs/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -106,7 +108,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update object
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
realpath := o.fs.realpath(src.Remote())
|
||||
realpath := o.fs.realpath(o.remote)
|
||||
dirname := path.Dir(realpath)
|
||||
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||
|
||||
@@ -141,7 +143,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
err = out.Close()
|
||||
// If the datanodes have acknowledged all writes but not yet
|
||||
// to the namenode, FileWriter.Close can return ErrReplicating
|
||||
// (wrapped in an os.PathError). This indicates that all data
|
||||
// has been written, but the lease is still open for the file.
|
||||
//
|
||||
// It is safe in this case to either ignore the error (and let
|
||||
// the lease expire on its own) or to call Close multiple
|
||||
// times until it completes without an error. The Java client,
|
||||
// for context, always chooses to retry, with exponential
|
||||
// backoff.
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err := out.Close()
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
return errors.Is(err, hdfs.ErrReplicating), err
|
||||
})
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return err
|
||||
|
||||
@@ -294,15 +294,6 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// copyDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
|
||||
}
|
||||
|
||||
// moveDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
package hidrive
|
||||
|
||||
// FIXME HiDrive only supports file or folder names of 255 characters or less.
|
||||
// Operations that create files oder folder with longer names will throw a HTTP error:
|
||||
// Operations that create files or folders with longer names will throw an HTTP error:
|
||||
// - 422 Unprocessable Entity
|
||||
// A more graceful way for rclone to handle this may be desirable.
|
||||
|
||||
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("could not access root-prefix: %w", err)
|
||||
}
|
||||
if item.Type != api.HiDriveObjectTypeDirectory {
|
||||
return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
|
||||
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -495,7 +495,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
add(fs.NewDir(remote, time.Time{}))
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
@@ -507,7 +507,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
add(fs.NewDir(remote, time.Time{}))
|
||||
} else {
|
||||
in <- remote
|
||||
}
|
||||
|
||||
@@ -133,11 +133,13 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
|
||||
},
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
|
||||
Name: "secret_access_key",
|
||||
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
|
||||
Name: "endpoint",
|
||||
|
||||
@@ -67,13 +67,21 @@ const (
|
||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
legacyConfigVersion = 0
|
||||
|
||||
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaCloudClientID = "desktop"
|
||||
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaseCloudClientID = "desktop"
|
||||
|
||||
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
|
||||
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
|
||||
telianoCloudClientID = "desktop"
|
||||
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
|
||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
||||
onlimeCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -84,7 +92,7 @@ func init() {
|
||||
Description: "Jottacloud",
|
||||
NewFs: NewFs,
|
||||
Config: Config,
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
@@ -119,7 +127,7 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -134,11 +142,17 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
Value: "legacy",
|
||||
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
|
||||
Value: "telia_se",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
|
||||
}, {
|
||||
Value: "telia_no",
|
||||
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}, {
|
||||
Value: "onlime",
|
||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
@@ -231,17 +245,32 @@ machines.`)
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "telia": // telia cloud config
|
||||
case "telia_se": // telia_se cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, teliaCloudClientID)
|
||||
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaCloudAuthURL,
|
||||
TokenURL: teliaCloudTokenURL,
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
},
|
||||
ClientID: teliaCloudClientID,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "telia_no": // telia_no cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
},
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
@@ -261,6 +290,21 @@ machines.`)
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "onlime": // onlime cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
},
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
@@ -1838,12 +1882,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err == nil {
|
||||
// if the object exists delete it
|
||||
err = o.remove(ctx, true)
|
||||
if err != nil {
|
||||
if err != nil && err != fs.ErrorObjectNotFound {
|
||||
// if delete failed then report that, unless it was because the file did not exist after all
|
||||
return fmt.Errorf("failed to remove old object: %w", err)
|
||||
}
|
||||
}
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1930,7 +1974,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
|
||||
return o.readMetaData(ctx, true)
|
||||
}
|
||||
|
||||
@@ -1951,10 +1995,17 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
|
||||
opts.Parameters.Set("dl", "true")
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -61,9 +61,10 @@ func init() {
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
@@ -376,7 +377,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for i, file := range files {
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
|
||||
entries[i] = fs.NewDir(remote, time.Time{})
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
@@ -243,7 +244,7 @@ type Fs struct {
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
xattrSupported int32 // whether xattrs are supported (atomic access)
|
||||
xattrSupported atomic.Int32 // whether xattrs are supported
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
@@ -266,7 +267,10 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
var (
|
||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
)
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -288,7 +292,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
if xattrSupported {
|
||||
f.xattrSupported = 1
|
||||
f.xattrSupported.Store(1)
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||
f.features = (&fs.Features{
|
||||
@@ -300,6 +304,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
FilterAware: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -310,7 +315,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
// Check to see if this is a .rclonelink if not found
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
return nil, errLinksNeedsSuffix
|
||||
}
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -503,7 +517,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
|
||||
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
@@ -540,11 +554,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
mode = fi.Mode()
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
@@ -557,6 +566,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -628,7 +642,13 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return os.Remove(f.localPath(dir))
|
||||
localPath := f.localPath(dir)
|
||||
if fi, err := os.Stat(localPath); err != nil {
|
||||
return err
|
||||
} else if !fi.IsDir() {
|
||||
return fs.ErrorIsFile
|
||||
}
|
||||
return os.Remove(localPath)
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -146,6 +147,20 @@ func TestSymlink(t *testing.T) {
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check that NewFs works with the suffixed version and --links
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.Equal(t, dir, f2.(*Fs).root)
|
||||
|
||||
// Check that NewFs doesn't see the non suffixed version with --links
|
||||
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, errLinksNeedsSuffix, err)
|
||||
require.Nil(t, f2)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -397,7 +412,7 @@ func TestFilter(t *testing.T) {
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func TestFilterSymlink(t *testing.T) {
|
||||
func testFilterSymlink(t *testing.T, copyLinks bool) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
@@ -411,15 +426,23 @@ func TestFilterSymlink(t *testing.T) {
|
||||
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
|
||||
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
// Set fs into "-l" mode
|
||||
// f.opt.FollowSymlinks = false
|
||||
// f.opt.TranslateSymlinks = true
|
||||
// f.lstat = os.Lstat
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
if copyLinks {
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
} else {
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
}
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
@@ -431,21 +454,35 @@ func TestFilterSymlink(t *testing.T) {
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included.file"))
|
||||
require.NoError(t, fi.AddRule("+ included.file.link"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir/**"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
|
||||
if copyLinks {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
|
||||
} else {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
|
||||
}
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check 1 global errors one for each dangling symlink
|
||||
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
if copyLinks {
|
||||
// Check 1 global errors one for each dangling symlink
|
||||
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
} else {
|
||||
// Check 0 global errors as dangling symlink copied properly
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
}
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
@@ -456,7 +493,11 @@ func TestFilterSymlink(t *testing.T) {
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Check listing through a symlink still works
|
||||
entries, err = f.List(ctx, "included.dir")
|
||||
@@ -466,3 +507,51 @@ func TestFilterSymlink(t *testing.T) {
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func TestFilterSymlinkCopyLinks(t *testing.T) {
|
||||
testFilterSymlink(t, true)
|
||||
}
|
||||
|
||||
func TestFilterSymlinkLinks(t *testing.T) {
|
||||
testFilterSymlink(t, false)
|
||||
}
|
||||
|
||||
func TestCopySymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file and a symlink to it
|
||||
r.WriteFile("src/file.txt", "hello world", when)
|
||||
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
|
||||
// Set fs into "-l/--links" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a symlink and it has the right contents
|
||||
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
|
||||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -23,7 +24,7 @@ func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
// Check statx() is available as it was only introduced in kernel 4.11
|
||||
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
|
||||
var stat unix.Statx_t
|
||||
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
||||
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
||||
readMetadataFromFileFn = readMetadataFromFileStatx
|
||||
} else {
|
||||
readMetadataFromFileFn = readMetadataFromFileFstatat
|
||||
@@ -91,7 +92,7 @@ func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
|
||||
// The types of t.Sec and t.Nsec vary from int32 to int64 on
|
||||
// different Linux architectures so we need to cast them to
|
||||
// int64 here and hence need to quiet the linter about
|
||||
// unecessary casts.
|
||||
// unnecessary casts.
|
||||
//
|
||||
// nolint: unconvert
|
||||
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
|
||||
|
||||
@@ -6,7 +6,6 @@ package local
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
@@ -28,7 +27,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
|
||||
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
|
||||
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
|
||||
// Show xattrs not supported
|
||||
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
|
||||
if f.xattrSupported.CompareAndSwap(1, 0) {
|
||||
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
|
||||
}
|
||||
return true
|
||||
@@ -41,7 +40,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
|
||||
// It doesn't return any attributes owned by this backend in
|
||||
// metadataKeys
|
||||
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var list []string
|
||||
@@ -90,7 +89,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
//
|
||||
// It doesn't set any attributes owned by this backend in metadataKeys
|
||||
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
|
||||
return nil
|
||||
}
|
||||
for k, value := range metadata {
|
||||
|
||||
@@ -85,10 +85,11 @@ func init() {
|
||||
Name: "mailru",
|
||||
Description: "Mail.ru Cloud",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: `Password.
|
||||
@@ -213,7 +214,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -58,9 +58,10 @@ func init() {
|
||||
Description: "Mega",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
@@ -90,7 +91,7 @@ permanently delete objects instead.`,
|
||||
MEGA uses plain text HTTP connections by default.
|
||||
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
|
||||
Enabling this will force MEGA to use HTTPS for all transfers.
|
||||
HTTPS is normally not necesary since all data is already encrypted anyway.
|
||||
HTTPS is normally not necessary since all data is already encrypted anyway.
|
||||
Enabling it will increase CPU usage and add network overhead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
@@ -205,7 +206,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// cache *mega.Mega on username so we can re-use and share
|
||||
// cache *mega.Mega on username so we can reuse and share
|
||||
// them between remotes. They are expensive to make as they
|
||||
// contain all the objects and sharing the objects makes the
|
||||
// move code easier as we don't have to worry about mixing
|
||||
|
||||
@@ -65,11 +65,13 @@ HTTP is provided primarily for debugging purposes.`,
|
||||
Help: `Domain+path of NetStorage host to connect to.
|
||||
|
||||
Format should be ` + "`<domain>/<internal folders>`",
|
||||
Required: true,
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "account",
|
||||
Help: "Set the NetStorage account name",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Set the NetStorage account name",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret",
|
||||
Help: `Set the NetStorage account secret/G2O key for authentication.
|
||||
@@ -819,6 +821,8 @@ func (f *Fs) getAuth(req *http.Request) error {
|
||||
// Set Authorization header
|
||||
dataHeader := generateDataHeader(f)
|
||||
path := req.URL.RequestURI()
|
||||
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
|
||||
actionHeader := req.Header["X-Akamai-ACS-Action"][0]
|
||||
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
|
||||
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)
|
||||
|
||||
@@ -131,10 +131,11 @@ Note that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "drive_type",
|
||||
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
|
||||
@@ -148,7 +149,8 @@ This isn't normally needed, but in special circumstances you might
|
||||
know the folder ID that you wish to access but not be able to get
|
||||
there through a path traversal.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_scopes",
|
||||
Help: `Set scopes to be requested by rclone.
|
||||
@@ -196,7 +198,9 @@ listing, set this option.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||
|
||||
This will only work if you are copying between two OneDrive *Personal* drives AND
|
||||
the files to copy are already shared between them. In other cases, rclone will
|
||||
@@ -258,14 +262,15 @@ this flag there.
|
||||
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Default: "auto",
|
||||
Help: `Specify the hash in use for the backend.
|
||||
|
||||
This specifies the hash type in use. If set to "auto" it will use the
|
||||
default hash which is is QuickXorHash.
|
||||
default hash which is QuickXorHash.
|
||||
|
||||
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
|
||||
Personal. For 1.62 and later the default is to use a QuickXorHash for
|
||||
@@ -301,6 +306,24 @@ rclone.
|
||||
Help: "None - don't use any hashes",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "av_override",
|
||||
Default: false,
|
||||
Help: `Allows download of files the server thinks has a virus.
|
||||
|
||||
The onedrive/sharepoint server may check files uploaded with an Anti
|
||||
Virus checker. If it detects any potential viruses or malware it will
|
||||
block download of the file.
|
||||
|
||||
In this case you will see a message like this
|
||||
|
||||
server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
|
||||
|
||||
If you are 100% sure you want to download this file anyway then use
|
||||
the --onedrive-av-override flag, or av_override = true in the config
|
||||
file.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -549,15 +572,18 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
case "url":
|
||||
return fs.ConfigInput("url_end", "config_site_url", `Site URL
|
||||
|
||||
Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
|
||||
Examples:
|
||||
- "mysite"
|
||||
- "https://XXX.sharepoint.com/sites/mysite"
|
||||
- "https://XXX.sharepoint.com/teams/ID"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: "/sites/" + match[1],
|
||||
relativePath: match[1],
|
||||
})
|
||||
}
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
@@ -640,6 +666,7 @@ type Options struct {
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
HashType string `config:"hash_type"`
|
||||
AVOverride bool `config:"av_override"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -1724,6 +1751,10 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
token := make(chan struct{}, f.ci.Checkers)
|
||||
var wg sync.WaitGroup
|
||||
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to list %q: %v", path, err)
|
||||
return nil
|
||||
}
|
||||
err = entries.ForObjectError(func(obj fs.Object) error {
|
||||
o, ok := obj.(*Object)
|
||||
if !ok {
|
||||
@@ -1962,12 +1993,20 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var resp *http.Response
|
||||
opts := o.fs.newOptsCall(o.id, "GET", "/content")
|
||||
opts.Options = options
|
||||
if o.fs.opt.AVOverride {
|
||||
opts.Parameters = url.Values{"AVOverride": {"1"}}
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
|
||||
err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ func New() hash.Hash {
|
||||
func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
var i int
|
||||
// fill last remain
|
||||
lastRemain := int(q.size) % dataSize
|
||||
lastRemain := q.size % dataSize
|
||||
if lastRemain != 0 {
|
||||
i += xorBytes(q.data[lastRemain:], p)
|
||||
}
|
||||
|
||||
@@ -42,9 +42,10 @@ func init() {
|
||||
Description: "OpenDrive",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Required: true,
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Password.",
|
||||
@@ -766,6 +767,17 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiError, ok := err.(*Error); ok {
|
||||
// Work around a bug maybe in opendrive or maybe in rclone.
|
||||
//
|
||||
// We should know whether the folder exists or not by the call to
|
||||
// FindDir above so exactly why it is not found here is a mystery.
|
||||
//
|
||||
// This manifests as a failure in fs/sync TestSyncOverlapWithFilter
|
||||
if apiError.Info.Message == "Folder is already deleted" {
|
||||
return fs.DirEntries{}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get folder list: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -128,18 +127,3 @@ func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package oracleobjectstorage
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -196,6 +197,32 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
|
||||
}
|
||||
|
||||
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
|
||||
//
|
||||
// Note that rather lazily we treat key as a prefix, so it matches
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pastUploads) > 0 {
|
||||
sort.Slice(pastUploads, func(i, j int) bool {
|
||||
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
|
||||
})
|
||||
return pastUploads[:1], nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
|
||||
uploads = []*objectstorage.MultipartUpload{}
|
||||
req := objectstorage.ListMultipartUploadsRequest{
|
||||
@@ -217,7 +244,13 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory str
|
||||
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
||||
continue
|
||||
}
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
if exact {
|
||||
if *item.Object == directory {
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
} else {
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
@@ -226,3 +259,34 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory str
|
||||
}
|
||||
return uploads, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
|
||||
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
||||
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
|
||||
req := objectstorage.ListMultipartUploadPartsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
Limit: common.Int(1000),
|
||||
}
|
||||
|
||||
var response objectstorage.ListMultipartUploadPartsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploadParts(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return uploadedParts, err
|
||||
}
|
||||
for _, item := range response.Items {
|
||||
uploadedParts[*item.PartNumber] = item
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploadedParts, nil
|
||||
}
|
||||
|
||||
441
backend/oracleobjectstorage/multipart.go
Normal file
441
backend/oracleobjectstorage/multipart.go
Normal file
@@ -0,0 +1,441 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// Info needed for an upload
|
||||
type uploadInfo struct {
|
||||
req *objectstorage.PutObjectRequest
|
||||
md5sumHex string
|
||||
}
|
||||
|
||||
type objectChunkWriter struct {
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
bucket *string
|
||||
key *string
|
||||
uploadID *string
|
||||
partsToCommit []objectstorage.CommitMultipartUploadPartDetails
|
||||
partsToCommitMu sync.Mutex
|
||||
existingParts map[int]objectstorage.MultipartUploadPartSummary
|
||||
eTag string
|
||||
md5sMu sync.Mutex
|
||||
md5s []byte
|
||||
ui uploadInfo
|
||||
o *Object
|
||||
}
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
|
||||
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(
|
||||
ctx context.Context,
|
||||
remote string,
|
||||
src fs.ObjectInfo,
|
||||
options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
|
||||
uploadParts := f.opt.MaxUploadParts
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
// calculate size of parts
|
||||
chunkSize := f.opt.ChunkSize
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48 GiB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
|
||||
})
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
|
||||
}
|
||||
|
||||
uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("create multipart upload request failed: %w", err)
|
||||
}
|
||||
bucketName, bucketPath := o.split()
|
||||
chunkWriter := &objectChunkWriter{
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
bucket: &bucketName,
|
||||
key: &bucketPath,
|
||||
uploadID: &uploadID,
|
||||
existingParts: existingParts,
|
||||
ui: ui,
|
||||
o: o,
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(chunkSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID)
|
||||
return info, chunkWriter, err
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
|
||||
if chunkNumber < 0 {
|
||||
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
|
||||
return -1, err
|
||||
}
|
||||
// Only account after the checksum reads have been done
|
||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||
// multiple of what it should be, increase or decrease this number.
|
||||
do.DelayAccounting(2)
|
||||
}
|
||||
m := md5.New()
|
||||
currentChunkSize, err := io.Copy(m, reader)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
// If no data read, don't write the chunk
|
||||
if currentChunkSize == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
md5sumBinary := m.Sum([]byte{})
|
||||
w.addMd5(&md5sumBinary, int64(chunkNumber))
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
|
||||
// Object storage requires 1 <= PartNumber <= 10000
|
||||
ossPartNumber := chunkNumber + 1
|
||||
if existing, ok := w.existingParts[ossPartNumber]; ok {
|
||||
if md5sum == *existing.Md5 {
|
||||
fs.Debugf(w.o, "matched uploaded part found, part num %d, skipping part, md5=%v", *existing.PartNumber, md5sum)
|
||||
w.addCompletedPart(existing.PartNumber, existing.Etag)
|
||||
return currentChunkSize, nil
|
||||
}
|
||||
}
|
||||
req := objectstorage.UploadPartRequest{
|
||||
NamespaceName: common.String(w.f.opt.Namespace),
|
||||
BucketName: w.bucket,
|
||||
ObjectName: w.key,
|
||||
UploadId: w.uploadID,
|
||||
UploadPartNum: common.Int(ossPartNumber),
|
||||
ContentLength: common.Int64(currentChunkSize),
|
||||
ContentMD5: common.String(md5sum),
|
||||
}
|
||||
w.o.applyPartUploadOptions(w.ui.req, &req)
|
||||
var resp objectstorage.UploadPartResponse
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// req.UploadPartBody = io.NopCloser(bytes.NewReader(buf))
|
||||
// rewind the reader on retry and after reading md5
|
||||
_, err = reader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.UploadPartBody = io.NopCloser(reader)
|
||||
resp, err = w.f.srv.UploadPart(ctx, req)
|
||||
if err != nil {
|
||||
if ossPartNumber <= 8 {
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
return false, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(w.o, "multipart upload failed to upload part:%d err: %v", ossPartNumber, err)
|
||||
return -1, fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||
}
|
||||
w.addCompletedPart(&ossPartNumber, resp.ETag)
|
||||
return currentChunkSize, err
|
||||
|
||||
}
|
||||
|
||||
// add a part number and etag to the completed parts
|
||||
func (w *objectChunkWriter) addCompletedPart(partNum *int, eTag *string) {
|
||||
w.partsToCommitMu.Lock()
|
||||
defer w.partsToCommitMu.Unlock()
|
||||
w.partsToCommit = append(w.partsToCommit, objectstorage.CommitMultipartUploadPartDetails{
|
||||
PartNum: partNum,
|
||||
Etag: eTag,
|
||||
})
|
||||
}
|
||||
|
||||
func (w *objectChunkWriter) Close(ctx context.Context) (err error) {
|
||||
req := objectstorage.CommitMultipartUploadRequest{
|
||||
NamespaceName: common.String(w.f.opt.Namespace),
|
||||
BucketName: w.bucket,
|
||||
ObjectName: w.key,
|
||||
UploadId: w.uploadID,
|
||||
}
|
||||
req.PartsToCommit = w.partsToCommit
|
||||
var resp objectstorage.CommitMultipartUploadResponse
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
resp, err = w.f.srv.CommitMultipartUpload(ctx, req)
|
||||
// if multipart is corrupted, we will abort the uploadId
|
||||
if isMultiPartUploadCorrupted(err) {
|
||||
fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID)
|
||||
_ = w.Abort(ctx)
|
||||
return false, err
|
||||
}
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.eTag = *resp.ETag
|
||||
hashOfHashes := md5.Sum(w.md5s)
|
||||
wantMultipartMd5 := fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hashOfHashes[:]), len(w.partsToCommit))
|
||||
gotMultipartMd5 := *resp.OpcMultipartMd5
|
||||
if wantMultipartMd5 != gotMultipartMd5 {
|
||||
fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
|
||||
return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
|
||||
}
|
||||
fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5)
|
||||
return nil
|
||||
}
|
||||
|
||||
func isMultiPartUploadCorrupted(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
// Check if this oci-err object, and if it is multipart commit error
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetCode() == "InvalidUploadPart" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *objectChunkWriter) Abort(ctx context.Context) error {
|
||||
fs.Debugf(w.o, "Cancelling multipart upload")
|
||||
err := w.o.fs.abortMultiPartUpload(
|
||||
ctx,
|
||||
w.bucket,
|
||||
w.key,
|
||||
w.uploadID)
|
||||
if err != nil {
|
||||
fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err)
|
||||
} else {
|
||||
fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// addMd5 adds a binary md5 to the md5 calculated so far
|
||||
func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
|
||||
w.md5sMu.Lock()
|
||||
defer w.md5sMu.Unlock()
|
||||
start := chunkNumber * md5.Size
|
||||
end := start + md5.Size
|
||||
if extend := end - int64(len(w.md5s)); extend > 0 {
|
||||
w.md5s = append(w.md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(w.md5s[start:end], (*md5binary)[:])
|
||||
}
|
||||
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
ui.req = &objectstorage.PutObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucket),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
if err != nil {
|
||||
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
ui.req.OpcMeta = make(map[string]string, len(meta)+2)
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
pv := common.String(v)
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
case "cache-control":
|
||||
ui.req.CacheControl = pv
|
||||
case "content-disposition":
|
||||
ui.req.ContentDisposition = pv
|
||||
case "content-encoding":
|
||||
ui.req.ContentEncoding = pv
|
||||
case "content-language":
|
||||
ui.req.ContentLanguage = pv
|
||||
case "content-type":
|
||||
ui.req.ContentType = pv
|
||||
case "tier":
|
||||
// ignore
|
||||
case "mtime":
|
||||
// mtime in meta overrides source ModTime
|
||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||
} else {
|
||||
modTime = metaModTime
|
||||
}
|
||||
case "btime":
|
||||
// write as metadata since we can't set it
|
||||
ui.req.OpcMeta[k] = v
|
||||
default:
|
||||
ui.req.OpcMeta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Set the mtime in the metadata
|
||||
ui.req.OpcMeta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non-multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
size := src.Size()
|
||||
isMultipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var md5sumBase64 string
|
||||
if !isMultipart || !o.fs.opt.DisableChecksum {
|
||||
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(ui.md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if isMultipart && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
ui.req.OpcMeta[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set the content type if it isn't set already
|
||||
if ui.req.ContentType == nil {
|
||||
ui.req.ContentType = common.String(fs.MimeType(ctx, src))
|
||||
}
|
||||
if size >= 0 {
|
||||
ui.req.ContentLength = common.Int64(size)
|
||||
}
|
||||
if md5sumBase64 != "" {
|
||||
ui.req.ContentMD5 = &md5sumBase64
|
||||
}
|
||||
o.applyPutOptions(ui.req, options...)
|
||||
useBYOKPutObject(o.fs, ui.req)
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return ui, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
ui.req.StorageTier = storageTier
|
||||
}
|
||||
// Check metadata keys and values are valid
|
||||
for key, value := range ui.req.OpcMeta {
|
||||
if !httpguts.ValidHeaderFieldName(key) {
|
||||
fs.Errorf(o, "Dropping invalid metadata key %q", key)
|
||||
delete(ui.req.OpcMeta, key)
|
||||
} else if value == "" {
|
||||
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
|
||||
delete(ui.req.OpcMeta, key)
|
||||
} else if !httpguts.ValidHeaderFieldValue(value) {
|
||||
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key)
|
||||
delete(ui.req.OpcMeta, key)
|
||||
}
|
||||
}
|
||||
return ui, nil
|
||||
}
|
||||
|
||||
func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) (
|
||||
uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
f := o.fs
|
||||
if f.opt.AttemptResumeUpload {
|
||||
fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote)
|
||||
resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath)
|
||||
if err == nil && len(resumeUploads) > 0 {
|
||||
uploadID = *resumeUploads[0].UploadId
|
||||
existingParts, err = f.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
|
||||
if err == nil {
|
||||
fs.Debugf(o, "resuming with existing upload id: %v", uploadID)
|
||||
return uploadID, existingParts, err
|
||||
}
|
||||
}
|
||||
}
|
||||
req := objectstorage.CreateMultipartUploadRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
req.Object = common.String(bucketPath)
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return "", nil, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultipartUploadOptions(putReq, &req)
|
||||
|
||||
var resp objectstorage.CreateMultipartUploadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CreateMultipartUpload(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", existingParts, err
|
||||
}
|
||||
existingParts = make(map[int]objectstorage.MultipartUploadPartSummary)
|
||||
uploadID = *resp.UploadId
|
||||
fs.Debugf(o, "created new upload id: %v", uploadID)
|
||||
return uploadID, existingParts, err
|
||||
}
|
||||
@@ -4,12 +4,14 @@
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -18,10 +20,8 @@ import (
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -367,9 +367,28 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
return resp.HTTPResponse().Body, nil
|
||||
}
|
||||
|
||||
func isZeroLength(streamReader io.Reader) bool {
|
||||
switch v := streamReader.(type) {
|
||||
case *bytes.Buffer:
|
||||
return v.Len() == 0
|
||||
case *bytes.Reader:
|
||||
return v.Len() == 0
|
||||
case *strings.Reader:
|
||||
return v.Len() == 0
|
||||
case *os.File:
|
||||
fi, err := v.Stat()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return fi.Size() == 0
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Update an object if it has changed
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
bucketName, _ := o.split()
|
||||
err = o.fs.makeBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -377,142 +396,24 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// determine if we like upload single or multipart.
|
||||
size := src.Size()
|
||||
multipart := size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
metadata := map[string]string{
|
||||
metaMtime: swift.TimeToFloatString(modTime),
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if isZeroLength(in) {
|
||||
multipart = false
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non-multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sumBase64 string
|
||||
var md5sumHex string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
|
||||
if multipart {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
uploadRequest := transfer.UploadRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PartSize: common.Int64(chunkSize),
|
||||
AllowMultipartUploads: common.Bool(true),
|
||||
AllowParrallelUploads: common.Bool(true),
|
||||
ObjectStorageClient: o.fs.srv,
|
||||
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
|
||||
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
|
||||
Metadata: metadataWithOpcPrefix(metadata),
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
useBYOKUpload(o.fs, &uploadRequest)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
}
|
||||
uploadMgr := transfer.NewUploadManager()
|
||||
var uploadID = ""
|
||||
|
||||
defer atexit.OnError(&err, func() {
|
||||
if uploadID == "" {
|
||||
return
|
||||
}
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := o.fs.abortMultiPartUpload(
|
||||
context.Background(),
|
||||
bucketName,
|
||||
bucketPath,
|
||||
uploadID)
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
})()
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
|
||||
var httpResponse *http.Response
|
||||
if err == nil {
|
||||
if uploadResponse.Type == transfer.MultipartUpload {
|
||||
if uploadResponse.MultipartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
|
||||
}
|
||||
} else {
|
||||
if uploadResponse.SinglepartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
uploadID := ""
|
||||
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
|
||||
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
|
||||
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
|
||||
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, httpResponse, err)
|
||||
})
|
||||
err = o.uploadMultipart(ctx, src, in, options...)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "multipart streaming upload failed %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := objectstorage.PutObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PutObjectBody: io.NopCloser(in),
|
||||
OpcMeta: metadata,
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
if size >= 0 {
|
||||
req.ContentLength = common.Int64(size)
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
useBYOKPutObject(o.fs, &req)
|
||||
var resp objectstorage.PutObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.PutObject(ctx, req)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
ui.req.PutObjectBody = io.NopCloser(in)
|
||||
resp, err = o.fs.srv.PutObject(ctx, *ui.req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -591,28 +492,24 @@ func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, opti
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.Metadata[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
func (o *Object) applyMultipartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.CreateMultipartUploadRequest) {
|
||||
req.ContentType = putReq.ContentType
|
||||
req.ContentLanguage = putReq.ContentLanguage
|
||||
req.ContentEncoding = putReq.ContentEncoding
|
||||
req.ContentDisposition = putReq.ContentDisposition
|
||||
req.CacheControl = putReq.CacheControl
|
||||
req.Metadata = metadataWithOpcPrefix(putReq.OpcMeta)
|
||||
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
|
||||
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
|
||||
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
|
||||
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
|
||||
}
|
||||
|
||||
func (o *Object) applyPartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.UploadPartRequest) {
|
||||
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
|
||||
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
|
||||
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
|
||||
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
|
||||
}
|
||||
|
||||
func metadataWithOpcPrefix(src map[string]string) map[string]string {
|
||||
|
||||
@@ -13,9 +13,10 @@ import (
|
||||
|
||||
const (
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadParts = 10000
|
||||
defaultUploadConcurrency = 10
|
||||
minChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
@@ -55,12 +56,14 @@ type Options struct {
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxUploadParts int `config:"max_upload_parts"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
AttemptResumeUpload bool `config:"attempt_resume_upload"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
@@ -92,14 +95,16 @@ func newOptions() []fs.Option {
|
||||
Help: noAuthHelpText,
|
||||
}},
|
||||
}, {
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Object storage Region",
|
||||
@@ -155,9 +160,8 @@ The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "upload_concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
@@ -179,6 +183,20 @@ statistics displayed with "-P" flag.
|
||||
`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "max_upload_parts",
|
||||
Help: `Maximum number of parts in a multipart upload.
|
||||
|
||||
This option defines the maximum number of multipart chunks to use
|
||||
when doing a multipart upload.
|
||||
|
||||
OCI has max parts limit of 10,000 chunks.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of a known size to stay below this number of chunks limit.
|
||||
`,
|
||||
Default: maxUploadParts,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
@@ -236,12 +254,24 @@ to start uploading.`,
|
||||
encoder.EncodeDot,
|
||||
}, {
|
||||
Name: "leave_parts_on_error",
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.
|
||||
|
||||
It should be set to true for resuming uploads across different sessions.
|
||||
|
||||
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
|
||||
additional costs if not cleaned up.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "attempt_resume_upload",
|
||||
Help: `If true attempt to resume previously started multipart upload for the object.
|
||||
This will be helpful to speed up multipart transfers by resuming uploads from past session.
|
||||
|
||||
WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is
|
||||
aborted and a new multipart upload is started with the new chunk size.
|
||||
|
||||
The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
@@ -289,7 +319,7 @@ Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/using
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: `if using using your own master key in vault, this header specifies the
|
||||
Help: `if using your own master key in vault, this header specifies the
|
||||
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
|
||||
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
|
||||
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,
|
||||
|
||||
@@ -318,7 +318,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote := *object.Name
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
// fs.Debugf(f, "Odd name received %v", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
@@ -558,15 +557,15 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
|
||||
if uploadID == "" {
|
||||
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID *string) (err error) {
|
||||
if uploadID == nil || *uploadID == "" {
|
||||
return nil
|
||||
}
|
||||
request := objectstorage.AbortMultipartUploadRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
BucketName: bucketName,
|
||||
ObjectName: bucketPath,
|
||||
UploadId: uploadID,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.AbortMultipartUpload(ctx, request)
|
||||
@@ -589,12 +588,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
|
||||
if operations.SkipDestructive(ctx, what, "remove pending upload") {
|
||||
continue
|
||||
}
|
||||
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
if ignoreErr != nil {
|
||||
// fs.Debugf(f, "ignoring error %s", ignoreErr)
|
||||
}
|
||||
} else {
|
||||
// fs.Debugf(f, "ignoring %s", what)
|
||||
_ = f.abortMultiPartUpload(ctx, upload.Bucket, upload.Object, upload.UploadId)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
|
||||
@@ -689,12 +683,13 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
|
||||
@@ -110,10 +110,11 @@ func init() {
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hostname",
|
||||
Help: `Hostname to connect to.
|
||||
@@ -138,7 +139,8 @@ with rclone authorize.
|
||||
This is only required when you want to use the cleanup command. Due to a bug
|
||||
in the pcloud API the required API does not support OAuth authentication so
|
||||
we have to rely on user password authentication for it.`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your pcloud password.",
|
||||
|
||||
536
backend/pikpak/api/types.go
Normal file
536
backend/pikpak/api/types.go
Normal file
@@ -0,0 +1,536 @@
|
||||
// Package api has type definitions for pikpak
|
||||
//
|
||||
// Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// "2022-09-17T14:31:06.056+08:00"
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the pikpak API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
if string(data) == "null" || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
KindOfFolder = "drive#folder"
|
||||
KindOfFile = "drive#file"
|
||||
KindOfFileList = "drive#fileList"
|
||||
KindOfResumable = "drive#resumable"
|
||||
KindOfForm = "drive#form"
|
||||
ThumbnailSizeS = "SIZE_SMALL"
|
||||
ThumbnailSizeM = "SIZE_MEDIUM"
|
||||
ThumbnailSizeL = "SIZE_LARGE"
|
||||
PhaseTypeComplete = "PHASE_TYPE_COMPLETE"
|
||||
PhaseTypeRunning = "PHASE_TYPE_RUNNING"
|
||||
PhaseTypeError = "PHASE_TYPE_ERROR"
|
||||
PhaseTypePending = "PHASE_TYPE_PENDING"
|
||||
UploadTypeForm = "UPLOAD_TYPE_FORM"
|
||||
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
|
||||
ListLimit = 100
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Error details api error from pikpak
|
||||
type Error struct {
|
||||
Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request"
|
||||
Code int `json:"error_code"`
|
||||
URL string `json:"error_url,omitempty"`
|
||||
Message string `json:"error_description,omitempty"`
|
||||
// can have either of `error_details` or `details``
|
||||
ErrorDetails []*ErrorDetails `json:"error_details,omitempty"`
|
||||
Details []*ErrorDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
// ErrorDetails contains further details of api error
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct {
|
||||
} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Message string `json:"message,omitempty"`
|
||||
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Filters contains parameters for filters when listing.
|
||||
//
|
||||
// possible operators
|
||||
// * in: a list of comma-separated string
|
||||
// * eq: "true" or "false"
|
||||
// * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00"
|
||||
type Filters struct {
|
||||
Phase map[string]string `json:"phase,omitempty"` // "in" or "eq"
|
||||
Trashed map[string]bool `json:"trashed,omitempty"` // "eq"
|
||||
Kind map[string]string `json:"kind,omitempty"` // "eq"
|
||||
Starred map[string]bool `json:"starred,omitempty"` // "eq"
|
||||
ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt"
|
||||
}
|
||||
|
||||
// Set sets filter values using field name, operator and corresponding value
|
||||
func (f *Filters) Set(field, operator, value string) {
|
||||
if value == "" {
|
||||
// UNSET for empty values
|
||||
return
|
||||
}
|
||||
r := reflect.ValueOf(f)
|
||||
fd := reflect.Indirect(r).FieldByName(field)
|
||||
if v, err := strconv.ParseBool(value); err == nil {
|
||||
fd.Set(reflect.ValueOf(map[string]bool{operator: v}))
|
||||
} else {
|
||||
fd.Set(reflect.ValueOf(map[string]string{operator: value}))
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Common Elements
|
||||
|
||||
// Link contains a download URL for opening files
|
||||
type Link struct {
|
||||
URL string `json:"url"`
|
||||
Token string `json:"token"`
|
||||
Expire Time `json:"expire"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
||||
func (l *Link) Valid() bool {
|
||||
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
|
||||
}
|
||||
|
||||
// URL is a basic form of URL
|
||||
type URL struct {
|
||||
Kind string `json:"kind,omitempty"` // e.g. "upload#url"
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Base Elements
|
||||
|
||||
// FileList contains a list of File elements
|
||||
type FileList struct {
|
||||
Kind string `json:"kind,omitempty"` // drive#fileList
|
||||
Files []*File `json:"files,omitempty"`
|
||||
NextPageToken string `json:"next_page_token"`
|
||||
Version string `json:"version,omitempty"`
|
||||
VersionOutdated bool `json:"version_outdated,omitempty"`
|
||||
}
|
||||
|
||||
// File is a basic element representing a single file object
|
||||
//
|
||||
// There are two types of download links,
|
||||
// 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and
|
||||
// 2) the other from File.Medias[].Link.URL.
|
||||
// Empirically, 2) is less restrictive to multiple concurrent range-requests
|
||||
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
|
||||
// However, it is not generally applicable as it is only for meadia.
|
||||
type File struct {
|
||||
Apps []*FileApp `json:"apps,omitempty"`
|
||||
Audit *FileAudit `json:"audit,omitempty"`
|
||||
Collection string `json:"collection,omitempty"` // TODO
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
DeleteTime Time `json:"delete_time,omitempty"`
|
||||
FileCategory string `json:"file_category,omitempty"`
|
||||
FileExtension string `json:"file_extension,omitempty"`
|
||||
FolderType string `json:"folder_type,omitempty"`
|
||||
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Links *FileLinks `json:"links,omitempty"`
|
||||
Md5Checksum string `json:"md5_checksum,omitempty"`
|
||||
Medias []*Media `json:"medias,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
|
||||
Name string `json:"name,omitempty"`
|
||||
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
Params *FileParams `json:"params,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
Phase string `json:"phase,omitempty"`
|
||||
Revision int `json:"revision,omitempty,string"`
|
||||
Size int64 `json:"size,omitempty,string"`
|
||||
SortName string `json:"sort_name,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
|
||||
Starred bool `json:"starred,omitempty"`
|
||||
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
||||
Trashed bool `json:"trashed,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
UserModifiedTime Time `json:"user_modified_time,omitempty"`
|
||||
WebContentLink string `json:"web_content_link,omitempty"`
|
||||
Writable bool `json:"writable,omitempty"`
|
||||
}
|
||||
|
||||
// FileLinks includes links to file at backend
|
||||
type FileLinks struct {
|
||||
ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"`
|
||||
}
|
||||
|
||||
// FileAudit contains audit information for the file
|
||||
type FileAudit struct {
|
||||
Status string `json:"status,omitempty"` // "STATUS_OK"
|
||||
Message string `json:"message,omitempty"`
|
||||
Title string `json:"title,omitempty"`
|
||||
}
|
||||
|
||||
// Media contains info about supported version of media, e.g. original, transcoded, etc
|
||||
type Media struct {
|
||||
MediaID string `json:"media_id,omitempty"`
|
||||
MediaName string `json:"media_name,omitempty"`
|
||||
Video struct {
|
||||
Height int `json:"height,omitempty"`
|
||||
Width int `json:"width,omitempty"`
|
||||
Duration int64 `json:"duration,omitempty"`
|
||||
BitRate int `json:"bit_rate,omitempty"`
|
||||
FrameRate int `json:"frame_rate,omitempty"`
|
||||
VideoCodec string `json:"video_codec,omitempty"` // "h264", "hevc"
|
||||
AudioCodec string `json:"audio_codec,omitempty"` // "pcm_bluray", "aac"
|
||||
VideoType string `json:"video_type,omitempty"` // "mpegts"
|
||||
HdrType string `json:"hdr_type,omitempty"`
|
||||
} `json:"video,omitempty"`
|
||||
Link *Link `json:"link,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
IsOrigin bool `json:"is_origin,omitempty"`
|
||||
ResolutionName string `json:"resolution_name,omitempty"`
|
||||
IsVisible bool `json:"is_visible,omitempty"`
|
||||
Category string `json:"category,omitempty"`
|
||||
}
|
||||
|
||||
// FileParams includes parameters for instant open
|
||||
type FileParams struct {
|
||||
Duration int64 `json:"duration,omitempty,string"` // in seconds
|
||||
Height int `json:"height,omitempty,string"`
|
||||
Platform string `json:"platform,omitempty"` // "Upload"
|
||||
PlatformIcon string `json:"platform_icon,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Width int `json:"width,omitempty,string"`
|
||||
}
|
||||
|
||||
// FileApp includes parameters for instant open
|
||||
type FileApp struct {
|
||||
ID string `json:"id,omitempty"` // "decompress" for rar files
|
||||
Name string `json:"name,omitempty"` // decompress" for rar files
|
||||
Access []interface{} `json:"access,omitempty"`
|
||||
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Params struct {
|
||||
} `json:"params,omitempty"` // TODO
|
||||
CategoryIds []interface{} `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct {
|
||||
} `json:"links,omitempty"` // TODO
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// TaskList contains a list of Task elements
|
||||
type TaskList struct {
|
||||
Tasks []*Task `json:"tasks,omitempty"` // "drive#task"
|
||||
NextPageToken string `json:"next_page_token"`
|
||||
ExpiresIn int `json:"expires_in,omitempty"`
|
||||
}
|
||||
|
||||
// Task is a basic element representing a single task such as offline download and upload
|
||||
type Task struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#task"
|
||||
ID string `json:"id,omitempty"` // task id?
|
||||
Name string `json:"name,omitempty"` // torrent name?
|
||||
Type string `json:"type,omitempty"` // "offline"
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
Statuses []interface{} `json:"statuses,omitempty"` // TODO
|
||||
StatusSize int `json:"status_size,omitempty"` // TODO
|
||||
Params *TaskParams `json:"params,omitempty"` // TODO
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
FileName string `json:"file_name,omitempty"`
|
||||
FileSize string `json:"file_size,omitempty"`
|
||||
Message string `json:"message,omitempty"` // e.g. "Saving"
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
UpdatedTime Time `json:"updated_time,omitempty"`
|
||||
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
||||
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
||||
Progress int `json:"progress,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Callback string `json:"callback,omitempty"`
|
||||
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
|
||||
Space string `json:"space,omitempty"`
|
||||
}
|
||||
|
||||
// TaskParams includes parameters informing status of Task
|
||||
type TaskParams struct {
|
||||
Age string `json:"age,omitempty"`
|
||||
PredictSpeed string `json:"predict_speed,omitempty"`
|
||||
PredictType string `json:"predict_type,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// Form contains parameters for upload by multipart/form-data
|
||||
type Form struct {
|
||||
Headers struct{} `json:"headers"`
|
||||
Kind string `json:"kind"` // "drive#form"
|
||||
Method string `json:"method"` // "POST"
|
||||
MultiParts struct {
|
||||
OSSAccessKeyID string `json:"OSSAccessKeyId"`
|
||||
Signature string `json:"Signature"`
|
||||
Callback string `json:"callback"`
|
||||
Key string `json:"key"`
|
||||
Policy string `json:"policy"`
|
||||
XUserData string `json:"x:user_data"`
|
||||
} `json:"multi_parts"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// Resumable contains parameters for upload by resumable
|
||||
type Resumable struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#resumable"
|
||||
Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN"
|
||||
Params *ResumableParams `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// ResumableParams specifies resumable paramegers
|
||||
type ResumableParams struct {
|
||||
AccessKeyID string `json:"access_key_id,omitempty"`
|
||||
AccessKeySecret string `json:"access_key_secret,omitempty"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Expiration Time `json:"expiration,omitempty"`
|
||||
Key string `json:"key,omitempty"`
|
||||
SecurityToken string `json:"security_token,omitempty"`
|
||||
}
|
||||
|
||||
// FileInArchive is a basic element in archive
|
||||
type FileInArchive struct {
|
||||
Index int `json:"index,omitempty"`
|
||||
Filename string `json:"filename,omitempty"`
|
||||
Filesize string `json:"filesize,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
Gcid string `json:"gcid,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFile is a response to RequestNewFile
|
||||
type NewFile struct {
|
||||
File *File `json:"file,omitempty"`
|
||||
Form *Form `json:"form,omitempty"`
|
||||
Resumable *Resumable `json:"resumable,omitempty"`
|
||||
Task *Task `json:"task,omitempty"` // null in this case
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
}
|
||||
|
||||
// NewTask is a response to RequestNewTask
|
||||
type NewTask struct {
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
|
||||
File *File `json:"file,omitempty"` // null in this case
|
||||
Task *Task `json:"task,omitempty"`
|
||||
URL *URL `json:"url,omitempty"` // {"kind": "upload#url"}
|
||||
}
|
||||
|
||||
// About informs drive status
|
||||
type About struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#about"
|
||||
Quota *Quota `json:"quota,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"`
|
||||
Quotas struct {
|
||||
} `json:"quotas,omitempty"` // maybe []*Quota?
|
||||
}
|
||||
|
||||
// Quota informs drive quota
|
||||
type Quota struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#quota"
|
||||
Limit int64 `json:"limit,omitempty,string"` // limit in bytes
|
||||
Usage int64 `json:"usage,omitempty,string"` // bytes in use
|
||||
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
|
||||
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
|
||||
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
|
||||
}
|
||||
|
||||
// Share is a response to RequestShare
|
||||
//
|
||||
// used in PublicLink()
|
||||
type Share struct {
|
||||
ShareID string `json:"share_id,omitempty"`
|
||||
ShareURL string `json:"share_url,omitempty"`
|
||||
PassCode string `json:"pass_code,omitempty"`
|
||||
ShareText string `json:"share_text,omitempty"`
|
||||
}
|
||||
|
||||
// User contains user account information
|
||||
//
|
||||
// GET https://user.mypikpak.com/v1/user/me
|
||||
type User struct {
|
||||
Sub string `json:"sub,omitempty"` // userid for internal use
|
||||
Name string `json:"name,omitempty"` // Username
|
||||
Picture string `json:"picture,omitempty"` // URL to Avatar image
|
||||
Email string `json:"email,omitempty"` // redacted email address
|
||||
Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider
|
||||
PhoneNumber string `json:"phone_number,omitempty"`
|
||||
Password string `json:"password,omitempty"` // "SET" if configured
|
||||
Status string `json:"status,omitempty"` // "ACTIVE"
|
||||
CreatedAt Time `json:"created_at,omitempty"`
|
||||
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
|
||||
}
|
||||
|
||||
// UserProvider details third-party authentication
|
||||
type UserProvider struct {
|
||||
ID string `json:"id,omitempty"` // e.g. "google.com"
|
||||
ProviderUserID string `json:"provider_user_id,omitempty"`
|
||||
Name string `json:"name,omitempty"` // username
|
||||
}
|
||||
|
||||
// VIP includes subscription details about premium account
|
||||
//
|
||||
// GET https://api-drive.mypikpak.com/drive/v1/privilege/vip
|
||||
type VIP struct {
|
||||
Result string `json:"result,omitempty"` // "ACCEPTED"
|
||||
Message string `json:"message,omitempty"`
|
||||
RedirectURI string `json:"redirect_uri,omitempty"`
|
||||
Data struct {
|
||||
Expire Time `json:"expire,omitempty"`
|
||||
Status string `json:"status,omitempty"` // "invalid" or "ok"
|
||||
Type string `json:"type,omitempty"` // "novip" or "platinum"
|
||||
UserID string `json:"user_id,omitempty"` // same as User.Sub
|
||||
} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// DecompressResult is a response to RequestDecompress
|
||||
type DecompressResult struct {
|
||||
Status string `json:"status,omitempty"` // "OK"
|
||||
StatusText string `json:"status_text,omitempty"`
|
||||
TaskID string `json:"task_id,omitempty"` // same as File.Id
|
||||
FilesNum int `json:"files_num,omitempty"` // number of files in archive
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// RequestShare is to request for file share
|
||||
type RequestShare struct {
|
||||
FileIds []string `json:"file_ids,omitempty"`
|
||||
ShareTo string `json:"share_to,omitempty"` // "publiclink",
|
||||
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
|
||||
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
|
||||
}
|
||||
|
||||
// RequestBatch is to request for batch actions
|
||||
type RequestBatch struct {
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
To map[string]string `json:"to,omitempty"`
|
||||
}
|
||||
|
||||
// RequestNewFile is to request for creating a new `drive#folder` or `drive#file`
|
||||
type RequestNewFile struct {
|
||||
// always required
|
||||
Kind string `json:"kind"` // "drive#folder" or "drive#file"
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parent_id"`
|
||||
FolderType string `json:"folder_type"`
|
||||
// only when uploading a new file
|
||||
Hash string `json:"hash,omitempty"` // sha1sum
|
||||
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
|
||||
Size int64 `json:"size,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
}
|
||||
|
||||
// RequestNewTask is to request for creating a new task like offline downloads
|
||||
//
|
||||
// Name and ParentID can be left empty.
|
||||
type RequestNewTask struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Name string `json:"name,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
|
||||
URL *URL `json:"url,omitempty"` // {"url": downloadUrl}
|
||||
FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD"
|
||||
}
|
||||
|
||||
// RequestDecompress is to request for decompress of archive files
|
||||
type RequestDecompress struct {
|
||||
Gcid string `json:"gcid,omitempty"` // same as File.Hash
|
||||
Password string `json:"password,omitempty"` // ""
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed
|
||||
DefaultParent bool `json:"default_parent,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NOT implemented YET
|
||||
|
||||
// RequestArchiveFileList is to request for a list of files in archive
|
||||
//
|
||||
// POST https://api-drive.mypikpak.com/decompress/v1/list
|
||||
type RequestArchiveFileList struct {
|
||||
Gcid string `json:"gcid,omitempty"` // same as api.File.Hash
|
||||
Path string `json:"path,omitempty"` // "" by default
|
||||
Password string `json:"password,omitempty"` // "" by default
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
}
|
||||
|
||||
// ArchiveFileList is a response to RequestArchiveFileList
|
||||
type ArchiveFileList struct {
|
||||
Status string `json:"status,omitempty"` // "OK"
|
||||
StatusText string `json:"status_text,omitempty"` // ""
|
||||
TaskID string `json:"task_id,omitempty"` // ""
|
||||
CurrentPath string `json:"current_path,omitempty"` // ""
|
||||
Title string `json:"title,omitempty"`
|
||||
FileSize int64 `json:"file_size,omitempty"`
|
||||
Gcid string `json:"gcid,omitempty"` // same as File.Hash
|
||||
Files []*FileInArchive `json:"files,omitempty"`
|
||||
}
|
||||
253
backend/pikpak/helper.go
Normal file
253
backend/pikpak/helper.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package pikpak
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
cachePrefix = "rclone-pikpak-sha1sum-"
|
||||
)
|
||||
|
||||
// requestDecompress requests decompress of compressed files
|
||||
func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) {
|
||||
req := &api.RequestDecompress{
|
||||
Gcid: file.Hash,
|
||||
Password: password,
|
||||
FileID: file.ID,
|
||||
Files: []*api.FileInArchive{},
|
||||
DefaultParent: true,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/decompress/v1/decompress",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getUserInfo gets UserInfo from API
|
||||
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://user.mypikpak.com/v1/user/me",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get userinfo: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getVIPInfo gets VIPInfo from API
|
||||
func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get vip info: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// requestBatchAction requests batch actions to API
|
||||
//
|
||||
// action can be one of batch{Copy,Delete,Trash,Untrash}
|
||||
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files:" + action,
|
||||
NoResponse: true, // Only returns `{"task_id":""}
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch action %q failed: %w", action, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requestNewTask requests a new api.NewTask and returns api.Task
|
||||
func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files",
|
||||
}
|
||||
var newTask api.NewTask
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTask.Task, nil
|
||||
}
|
||||
|
||||
// requestNewFile requests a new api.NewFile and returns api.File
|
||||
func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getFile gets api.File from API for the ID passed
|
||||
// and returns rich information containing additional fields below
|
||||
// * web_content_link
|
||||
// * thumbnail_link
|
||||
// * links
|
||||
// * medias
|
||||
func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/files/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
if err == nil && info.Phase != api.PhaseTypeComplete {
|
||||
// could be pending right after file is created/uploaded.
|
||||
return true, errors.New("not PHASE_TYPE_COMPLETE")
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// patchFile updates attributes of the file by ID
|
||||
//
|
||||
// currently known patchable fields are
|
||||
// * name
|
||||
func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/drive/v1/files/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getAbout gets drive#quota information from server
|
||||
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/about",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// requestShare returns information about ssharable links
|
||||
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/share",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Read the sha1 of in returning a reader which will read the same contents
|
||||
//
|
||||
// The cleanup function should be called when out is finished with
|
||||
// regardless of whether this function returned an error or not.
|
||||
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need an SHA1
|
||||
hash := sha1.New()
|
||||
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
|
||||
teeReader := io.TeeReader(in, hash)
|
||||
|
||||
// nothing to clean up by default
|
||||
cleanup = func() {}
|
||||
|
||||
// don't cache small files on disk to reduce wear of the disk
|
||||
if size > threshold {
|
||||
var tempFile *os.File
|
||||
|
||||
// create the cache file
|
||||
tempFile, err = os.CreateTemp("", cachePrefix)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
|
||||
|
||||
// clean up the file after we are done downloading
|
||||
cleanup = func() {
|
||||
// the file should normally already be close, but just to make sure
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
|
||||
}
|
||||
|
||||
// copy the ENTIRE file to disc and calculate the SHA1 in the process
|
||||
if _, err = io.Copy(tempFile, teeReader); err != nil {
|
||||
return
|
||||
}
|
||||
// jump to the start of the local file so we can pass it along
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// replace the already read source with a reader of our cached file
|
||||
out = tempFile
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// set the reader to our read memory block
|
||||
out = bytes.NewReader(inData)
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
|
||||
}
|
||||
1718
backend/pikpak/pikpak.go
Normal file
1718
backend/pikpak/pikpak.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/pikpak/pikpak_test.go
Normal file
17
backend/pikpak/pikpak_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test PikPak filesystem interface
|
||||
package pikpak_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPikPak:",
|
||||
NilObject: (*pikpak.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -82,14 +82,15 @@ func init() {
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "api_key",
|
||||
Help: `API Key.
|
||||
|
||||
This is not normally used - use oauth instead.
|
||||
`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -99,7 +100,7 @@ This is not normally used - use oauth instead.
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
1114
backend/protondrive/protondrive.go
Normal file
1114
backend/protondrive/protondrive.go
Normal file
File diff suppressed because it is too large
Load Diff
16
backend/protondrive/protondrive_test.go
Normal file
16
backend/protondrive/protondrive_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package protondrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/protondrive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestProtonDrive:",
|
||||
NilObject: (*protondrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
@@ -252,9 +253,12 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return f.putUnchecked(ctx, in, src, src.Remote(), options...)
|
||||
}
|
||||
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -540,24 +544,59 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
var resp struct {
|
||||
File putio.File `json:"file"`
|
||||
}
|
||||
// For some unknown reason the API sometimes returns the name
|
||||
// already exists unless we upload to a temporary name and
|
||||
// rename
|
||||
//
|
||||
// {"error_id":null,"error_message":"Name already exist","error_type":"NAME_ALREADY_EXIST","error_uri":"http://api.put.io/v2/docs","extra":{},"status":"ERROR","status_code":400}
|
||||
suffix := "." + random.String(8)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf+suffix))
|
||||
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
_, err = f.client.Do(req, &resp)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/rename", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
_, err = f.client.Do(req, &resp)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err = f.newObjectWithInfo(ctx, remote, resp.File)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.SetModTime(ctx, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
@@ -579,6 +618,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
@@ -596,7 +636,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
o, err = f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.SetModTime(ctx, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
|
||||
@@ -275,7 +275,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
|
||||
newObj, err := o.fs.putUnchecked(ctx, in, src, o.remote, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func init() {
|
||||
NoOffline: true,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -77,7 +77,7 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -49,11 +49,13 @@ func init() {
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",
|
||||
|
||||
182
backend/quatrix/api/types.go
Normal file
182
backend/quatrix/api/types.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// Package api provides types used by the Quatrix API.
|
||||
package api
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// OverwriteOnCopyMode is a conflict resolve mode during copy. Files with conflicting names will be overwritten
|
||||
const OverwriteOnCopyMode = "overwrite"
|
||||
|
||||
// ProfileInfo is a profile info about quota
|
||||
type ProfileInfo struct {
|
||||
UserUsed int64 `json:"user_used"`
|
||||
UserLimit int64 `json:"user_limit"`
|
||||
AccUsed int64 `json:"acc_used"`
|
||||
AccLimit int64 `json:"acc_limit"`
|
||||
}
|
||||
|
||||
// IDList is a general object that contains list of ids
|
||||
type IDList struct {
|
||||
IDs []string `json:"ids"`
|
||||
}
|
||||
|
||||
// DeleteParams is the request to delete object
|
||||
type DeleteParams struct {
|
||||
IDs []string `json:"ids"`
|
||||
DeletePermanently bool `json:"delete_permanently"`
|
||||
}
|
||||
|
||||
// FileInfoParams is the request to get object's (file or directory) info
|
||||
type FileInfoParams struct {
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// FileInfo is the response to get object's (file or directory) info
|
||||
type FileInfo struct {
|
||||
FileID string `json:"file_id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Src string `json:"src"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// IsFile returns true if object is a file
|
||||
// false otherwise
|
||||
func (fi *FileInfo) IsFile() bool {
|
||||
if fi == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return fi.Type == "F"
|
||||
}
|
||||
|
||||
// IsDir returns true if object is a directory
|
||||
// false otherwise
|
||||
func (fi *FileInfo) IsDir() bool {
|
||||
if fi == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return fi.Type == "D" || fi.Type == "S" || fi.Type == "T"
|
||||
}
|
||||
|
||||
// CreateDirParams is the request to create a directory
|
||||
type CreateDirParams struct {
|
||||
Target string `json:"target,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Resolve bool `json:"resolve"`
|
||||
}
|
||||
|
||||
// File represent metadata about object in Quatrix (file or directory)
|
||||
type File struct {
|
||||
ID string `json:"id"`
|
||||
Created JSONTime `json:"created"`
|
||||
Modified JSONTime `json:"modified"`
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Size int64 `json:"size"`
|
||||
ModifiedMS JSONTime `json:"modified_ms"`
|
||||
Type string `json:"type"`
|
||||
Operations int `json:"operations"`
|
||||
SubType string `json:"sub_type"`
|
||||
Content []File `json:"content"`
|
||||
}
|
||||
|
||||
// IsFile returns true if object is a file
|
||||
// false otherwise
|
||||
func (f *File) IsFile() bool {
|
||||
if f == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return f.Type == "F"
|
||||
}
|
||||
|
||||
// IsDir returns true if object is a directory
|
||||
// false otherwise
|
||||
func (f *File) IsDir() bool {
|
||||
if f == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return f.Type == "D" || f.Type == "S" || f.Type == "T"
|
||||
}
|
||||
|
||||
// SetMTimeParams is the request to set modification time for object
|
||||
type SetMTimeParams struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
MTime JSONTime `json:"mtime"`
|
||||
}
|
||||
|
||||
// JSONTime provides methods to marshal/unmarshal time.Time as Unix time
|
||||
type JSONTime time.Time
|
||||
|
||||
// MarshalJSON returns time representation in Unix time
|
||||
func (u JSONTime) MarshalJSON() ([]byte, error) {
|
||||
return []byte(strconv.FormatFloat(float64(time.Time(u).UTC().UnixNano())/1e9, 'f', 6, 64)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets time from Unix time representation
|
||||
func (u *JSONTime) UnmarshalJSON(data []byte) error {
|
||||
f, err := strconv.ParseFloat(string(data), 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := JSONTime(time.Unix(0, int64(f*1e9)))
|
||||
*u = t
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns Unix time representation of time as string
|
||||
func (u JSONTime) String() string {
|
||||
return strconv.FormatInt(time.Time(u).UTC().Unix(), 10)
|
||||
}
|
||||
|
||||
// DownloadLinkResponse is the response to download-link request
|
||||
type DownloadLinkResponse struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// UploadLinkParams is the request to get upload-link
|
||||
type UploadLinkParams struct {
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Resolve bool `json:"resolve"`
|
||||
}
|
||||
|
||||
// UploadLinkResponse is the response to upload-link request
|
||||
type UploadLinkResponse struct {
|
||||
Name string `json:"name"`
|
||||
FileID string `json:"file_id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
}
|
||||
|
||||
// UploadFinalizeResponse is the response to finalize file method
|
||||
type UploadFinalizeResponse struct {
|
||||
FileID string `json:"id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Modified int64 `json:"modified"`
|
||||
FileSize int64 `json:"size"`
|
||||
}
|
||||
|
||||
// FileModifyParams is the request to get modify file link
|
||||
type FileModifyParams struct {
|
||||
ID string `json:"id"`
|
||||
Truncate int64 `json:"truncate"`
|
||||
}
|
||||
|
||||
// FileCopyMoveOneParams is the request to do server-side copy and move
|
||||
// can be used for file or directory
|
||||
type FileCopyMoveOneParams struct {
|
||||
ID string `json:"file_id"`
|
||||
Target string `json:"target_id"`
|
||||
Name string `json:"name"`
|
||||
MTime JSONTime `json:"mtime"`
|
||||
Resolve bool `json:"resolve"`
|
||||
ResolveMode string `json:"resolve_mode"`
|
||||
}
|
||||
1254
backend/quatrix/quatrix.go
Normal file
1254
backend/quatrix/quatrix.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/quatrix/quatrix_test.go
Normal file
17
backend/quatrix/quatrix_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Quatrix filesystem interface
|
||||
package quatrix_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/quatrix"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestQuatrix:",
|
||||
NilObject: (*quatrix.Object)(nil),
|
||||
})
|
||||
}
|
||||
108
backend/quatrix/upload_memory.go
Normal file
108
backend/quatrix/upload_memory.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package quatrix
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// UploadMemoryManager dynamically calculates every chunk size for the transfer and increases or decreases it
|
||||
// depending on the upload speed. This makes general upload time smaller, because transfers that are faster
|
||||
// does not have to wait for the slower ones until they finish upload.
|
||||
type UploadMemoryManager struct {
|
||||
m sync.Mutex
|
||||
useDynamicSize bool
|
||||
shared int64
|
||||
reserved int64
|
||||
effectiveTime time.Duration
|
||||
fileUsage map[string]int64
|
||||
}
|
||||
|
||||
// NewUploadMemoryManager is a constructor for UploadMemoryManager
|
||||
func NewUploadMemoryManager(ci *fs.ConfigInfo, opt *Options) *UploadMemoryManager {
|
||||
useDynamicSize := true
|
||||
|
||||
sharedMemory := int64(opt.MaximalSummaryChunkSize) - int64(opt.MinimalChunkSize)*int64(ci.Transfers)
|
||||
if sharedMemory <= 0 {
|
||||
sharedMemory = 0
|
||||
useDynamicSize = false
|
||||
}
|
||||
|
||||
return &UploadMemoryManager{
|
||||
useDynamicSize: useDynamicSize,
|
||||
shared: sharedMemory,
|
||||
reserved: int64(opt.MinimalChunkSize),
|
||||
effectiveTime: time.Duration(opt.EffectiveUploadTime),
|
||||
fileUsage: map[string]int64{},
|
||||
}
|
||||
}
|
||||
|
||||
// Consume -- decide amount of memory to consume
|
||||
func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed float64) int64 {
|
||||
if !u.useDynamicSize {
|
||||
if neededMemory < u.reserved {
|
||||
return neededMemory
|
||||
}
|
||||
|
||||
return u.reserved
|
||||
}
|
||||
|
||||
u.m.Lock()
|
||||
defer u.m.Unlock()
|
||||
|
||||
borrowed, found := u.fileUsage[fileID]
|
||||
if found {
|
||||
u.shared += borrowed
|
||||
borrowed = 0
|
||||
}
|
||||
|
||||
defer func() { u.fileUsage[fileID] = borrowed }()
|
||||
|
||||
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
|
||||
|
||||
if effectiveChunkSize < u.reserved {
|
||||
effectiveChunkSize = u.reserved
|
||||
}
|
||||
|
||||
if neededMemory < effectiveChunkSize {
|
||||
effectiveChunkSize = neededMemory
|
||||
}
|
||||
|
||||
if effectiveChunkSize <= u.reserved {
|
||||
return effectiveChunkSize
|
||||
}
|
||||
|
||||
toBorrow := effectiveChunkSize - u.reserved
|
||||
|
||||
if toBorrow <= u.shared {
|
||||
u.shared -= toBorrow
|
||||
borrowed = toBorrow
|
||||
|
||||
return effectiveChunkSize
|
||||
}
|
||||
|
||||
borrowed = u.shared
|
||||
u.shared = 0
|
||||
|
||||
return borrowed + u.reserved
|
||||
}
|
||||
|
||||
// Return returns consumed memory for the previous chunk upload to the memory pool
|
||||
func (u *UploadMemoryManager) Return(fileID string) {
|
||||
if !u.useDynamicSize {
|
||||
return
|
||||
}
|
||||
|
||||
u.m.Lock()
|
||||
defer u.m.Unlock()
|
||||
|
||||
borrowed, found := u.fileUsage[fileID]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
u.shared += borrowed
|
||||
|
||||
delete(u.fileUsage, fileID)
|
||||
}
|
||||
1126
backend/s3/s3.go
1126
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -6,15 +6,19 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -250,7 +254,8 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Create an object
|
||||
const fileName = "test-versions.txt"
|
||||
const dirName = "versions"
|
||||
const fileName = dirName + "/" + "test-versions.txt"
|
||||
contents := random.String(100)
|
||||
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
@@ -280,11 +285,12 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
}()
|
||||
|
||||
// Read the contents
|
||||
entries, err := f.List(ctx, "")
|
||||
entries, err := f.List(ctx, dirName)
|
||||
require.NoError(t, err)
|
||||
tests := 0
|
||||
var fileNameVersion string
|
||||
for _, entry := range entries {
|
||||
t.Log(entry)
|
||||
remote := entry.Remote()
|
||||
if remote == fileName {
|
||||
t.Run("ReadCurrent", func(t *testing.T) {
|
||||
@@ -309,6 +315,23 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
require.NotNil(t, o)
|
||||
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
||||
})
|
||||
|
||||
// Check we can make a NewFs from that object with a version suffix
|
||||
t.Run("NewFs", func(t *testing.T) {
|
||||
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
|
||||
// Make sure --s3-versions is set in the config of the new remote
|
||||
fs.Debugf(nil, "oldPath = %q", newPath)
|
||||
lastColon := strings.LastIndex(newPath, ":")
|
||||
require.True(t, lastColon >= 0)
|
||||
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
|
||||
fs.Debugf(nil, "newPath = %q", newPath)
|
||||
fNew, err := cache.Get(ctx, newPath)
|
||||
// This should return pointing to a file
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.NotNil(t, fNew)
|
||||
// With the directory the directory above
|
||||
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("VersionAt", func(t *testing.T) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -20,6 +21,24 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
name := "TestS3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
var s3ParamsToSign = map[string]struct{}{
|
||||
"delete": {},
|
||||
"acl": {},
|
||||
"location": {},
|
||||
"logging": {},
|
||||
|
||||
@@ -17,17 +17,17 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRenewalInTimeLimit(t *testing.T) {
|
||||
var count int64
|
||||
var count atomic.Int64
|
||||
|
||||
renew := NewRenew(100*time.Millisecond, func() error {
|
||||
atomic.AddInt64(&count, 1)
|
||||
count.Add(1)
|
||||
return nil
|
||||
})
|
||||
time.Sleep(time.Second)
|
||||
renew.Shutdown()
|
||||
|
||||
// there's no guarantee the CI agent can handle a simple goroutine
|
||||
renewCount := atomic.LoadInt64(&count)
|
||||
renewCount := count.Load()
|
||||
t.Logf("renew count = %d", renewCount)
|
||||
assert.Greater(t, renewCount, int64(0))
|
||||
assert.Less(t, renewCount, int64(11))
|
||||
|
||||
@@ -67,15 +67,18 @@ func init() {
|
||||
Value: "https://cloud.seafile.com/",
|
||||
Help: "Connect to cloud.seafile.com.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: configUser,
|
||||
Help: "User name (usually email address).",
|
||||
Required: true,
|
||||
Name: configUser,
|
||||
Help: "User name (usually email address).",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// Password is not required, it will be left blank for 2FA
|
||||
Name: configPassword,
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config2FA,
|
||||
Help: "Two-factor authentication ('true' if the account has 2FA enabled).",
|
||||
@@ -87,6 +90,7 @@ func init() {
|
||||
Name: configLibraryKey,
|
||||
Help: "Library password (for encrypted libraries only).\n\nLeave blank if you pass it through the command line.",
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: configCreateLibrary,
|
||||
Help: "Should rclone create a library if it doesn't exist.",
|
||||
@@ -94,9 +98,10 @@ func init() {
|
||||
Default: false,
|
||||
}, {
|
||||
// Keep the authentication token after entering the 2FA code
|
||||
Name: configAuthToken,
|
||||
Help: "Authentication token.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Name: configAuthToken,
|
||||
Help: "Authentication token.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user