mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
726 Commits
pr-6474-pr
...
fix-http-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
444a6e6d2d | ||
|
|
982f76b4df | ||
|
|
347812d1d3 | ||
|
|
f4449440f8 | ||
|
|
e66675d346 | ||
|
|
45228e2f18 | ||
|
|
b866850fdd | ||
|
|
5b63b9534f | ||
|
|
10449c86a4 | ||
|
|
26a9a9fed2 | ||
|
|
602e42d334 | ||
|
|
4c5a21703e | ||
|
|
f2ee949eff | ||
|
|
3ad255172c | ||
|
|
29b1751d0e | ||
|
|
363da9aa82 | ||
|
|
6c8148ef39 | ||
|
|
3ed4a2e963 | ||
|
|
aaadb48d48 | ||
|
|
52e25c43b9 | ||
|
|
9a66563fc6 | ||
|
|
6ca670d66a | ||
|
|
809653055d | ||
|
|
61325ce507 | ||
|
|
c3989d1906 | ||
|
|
a79887171c | ||
|
|
f29e284c90 | ||
|
|
9a66086fa0 | ||
|
|
1845c261c6 | ||
|
|
70cbcef624 | ||
|
|
9169b2b5ab | ||
|
|
0957c8fb74 | ||
|
|
bb0cd76a5f | ||
|
|
08240c8cf5 | ||
|
|
014acc902d | ||
|
|
33fec9c835 | ||
|
|
3a5ffc7839 | ||
|
|
8a6bf35481 | ||
|
|
f7d27f4bf2 | ||
|
|
378a2d21ee | ||
|
|
3404eb0444 | ||
|
|
13e5701f2a | ||
|
|
432d5d1e20 | ||
|
|
cc05159518 | ||
|
|
119ccb2b95 | ||
|
|
0ef0e908ca | ||
|
|
0063d14dbb | ||
|
|
0d34efb10f | ||
|
|
415f4b2b93 | ||
|
|
07cf5f1d25 | ||
|
|
7d31956169 | ||
|
|
473d443874 | ||
|
|
e294b76121 | ||
|
|
8f3c583870 | ||
|
|
d0d41fe847 | ||
|
|
297f15a3e3 | ||
|
|
d5f0affd4b | ||
|
|
0598aafbfd | ||
|
|
528e22f139 | ||
|
|
f1a8420814 | ||
|
|
e250f1afcd | ||
|
|
ebf24c9872 | ||
|
|
b4c7b240d8 | ||
|
|
22a14a8c98 | ||
|
|
07133b892d | ||
|
|
a8ca18165e | ||
|
|
8c4e71fc84 | ||
|
|
351e2db2ef | ||
|
|
2234feb23d | ||
|
|
fb5125ecee | ||
|
|
e8cbc54a06 | ||
|
|
00512e1303 | ||
|
|
fcfbd3153b | ||
|
|
9a8075b682 | ||
|
|
996037bee9 | ||
|
|
e90537b2e9 | ||
|
|
42c211c6b2 | ||
|
|
3d4f127b33 | ||
|
|
ff966b37af | ||
|
|
3b6effa81a | ||
|
|
8308d5d640 | ||
|
|
14024936a8 | ||
|
|
9065e921c1 | ||
|
|
99788b605e | ||
|
|
d4cc3760e6 | ||
|
|
a6acbd1844 | ||
|
|
389565f5e2 | ||
|
|
4b4198522d | ||
|
|
f7665300c0 | ||
|
|
73beae147f | ||
|
|
92f8e476b7 | ||
|
|
5849148d51 | ||
|
|
37853ec412 | ||
|
|
ae7ff28714 | ||
|
|
9873f4bc74 | ||
|
|
1b200bf69a | ||
|
|
e3fa6fe3cc | ||
|
|
9e1b3861e7 | ||
|
|
e9a753f678 | ||
|
|
708391a5bf | ||
|
|
1cfed18aa7 | ||
|
|
7751d5a00b | ||
|
|
8274712c2c | ||
|
|
625a564ba3 | ||
|
|
2dd2072cdb | ||
|
|
998d1d1727 | ||
|
|
fcb912a664 | ||
|
|
5f938fb9ed | ||
|
|
72b79504ea | ||
|
|
3e2a606adb | ||
|
|
95a6e3e338 | ||
|
|
d06bb55f3f | ||
|
|
9f3694cea3 | ||
|
|
2c50f26c36 | ||
|
|
22d6c8d30d | ||
|
|
96fb75c5a7 | ||
|
|
acd67edf9a | ||
|
|
b26db8e640 | ||
|
|
da955e5d4f | ||
|
|
4f8dab8bce | ||
|
|
000ddc4951 | ||
|
|
3faa84b47c | ||
|
|
e1162ec440 | ||
|
|
30cccc7101 | ||
|
|
1f5a29209e | ||
|
|
45255bccb3 | ||
|
|
055206c4ee | ||
|
|
f3070b82bc | ||
|
|
7e2deffc62 | ||
|
|
ae3ff50580 | ||
|
|
6486ba6344 | ||
|
|
7842000f8a | ||
|
|
1f9c962183 | ||
|
|
279d9ecc56 | ||
|
|
31773ecfbf | ||
|
|
666e34cf69 | ||
|
|
5a84a08b3f | ||
|
|
51a468b2ba | ||
|
|
fc798d800c | ||
|
|
3115ede1d8 | ||
|
|
7a5491ba7b | ||
|
|
a6cf4989b6 | ||
|
|
f489b54fa0 | ||
|
|
6244d1729b | ||
|
|
e97c2a2832 | ||
|
|
56bf9b4a10 | ||
|
|
ceb9406c2f | ||
|
|
1f887f7ba0 | ||
|
|
7db26b6b34 | ||
|
|
37a3309438 | ||
|
|
97be9015a4 | ||
|
|
487e4f09b3 | ||
|
|
09a408664d | ||
|
|
43fa256d56 | ||
|
|
6859c04772 | ||
|
|
38a0539096 | ||
|
|
2cd85813b4 | ||
|
|
e6e6069ecf | ||
|
|
fcf47a8393 | ||
|
|
46a323ae14 | ||
|
|
72be80ddca | ||
|
|
a9e7e7bcc2 | ||
|
|
925c4382e2 | ||
|
|
08c60c3091 | ||
|
|
5c594fea90 | ||
|
|
cc01223535 | ||
|
|
aaacfa51a0 | ||
|
|
c18c66f167 | ||
|
|
d6667d34e7 | ||
|
|
e649cf4d50 | ||
|
|
f080ec437c | ||
|
|
4023eaebe0 | ||
|
|
baf16a65f0 | ||
|
|
70fe2ac852 | ||
|
|
41cf7faea4 | ||
|
|
f226f2dfb1 | ||
|
|
31caa019fa | ||
|
|
0468375054 | ||
|
|
6001f05a12 | ||
|
|
f7b87a8049 | ||
|
|
d379641021 | ||
|
|
84281c9089 | ||
|
|
8e2dc069d2 | ||
|
|
61d6f538b3 | ||
|
|
65b2e378e0 | ||
|
|
dea6bdf3df | ||
|
|
27eb8c7f45 | ||
|
|
1607344613 | ||
|
|
5f138dd822 | ||
|
|
2520c05c4b | ||
|
|
f7f5e87632 | ||
|
|
a7e6806f26 | ||
|
|
d0eb884262 | ||
|
|
ae6874170f | ||
|
|
f5bab284c3 | ||
|
|
c75dfa6436 | ||
|
|
56eb82bdfc | ||
|
|
066e00b470 | ||
|
|
e0c445d36e | ||
|
|
74652bf318 | ||
|
|
b6a95c70e9 | ||
|
|
aca7d0fd22 | ||
|
|
12761b3058 | ||
|
|
3567a47258 | ||
|
|
6b670bd439 | ||
|
|
335ca6d572 | ||
|
|
c4a9e480c9 | ||
|
|
232d304c13 | ||
|
|
44ac79e357 | ||
|
|
0487e465ee | ||
|
|
bb6cfe109d | ||
|
|
864eb89a67 | ||
|
|
4471e6f258 | ||
|
|
e82db0b7d5 | ||
|
|
72e624c5e4 | ||
|
|
6092fa57c3 | ||
|
|
3e15a594b7 | ||
|
|
db8c007983 | ||
|
|
5836da14c2 | ||
|
|
8ed07d11a0 | ||
|
|
1f2ee44c20 | ||
|
|
32798dca25 | ||
|
|
075f98551f | ||
|
|
963ab220f6 | ||
|
|
281a007b1a | ||
|
|
589b7b4873 | ||
|
|
04d2781fda | ||
|
|
5b95fd9588 | ||
|
|
a42643101e | ||
|
|
bcca67efd5 | ||
|
|
7771aaacf6 | ||
|
|
fda06fc17d | ||
|
|
2faa4758e4 | ||
|
|
9a9ef040e3 | ||
|
|
ca403dc90e | ||
|
|
451f4c2a8f | ||
|
|
5f6b105c3e | ||
|
|
d98837b7e6 | ||
|
|
99dd748fec | ||
|
|
bdfe213c47 | ||
|
|
52fbb10b47 | ||
|
|
6cb584f455 | ||
|
|
ec8bbb8d30 | ||
|
|
fcdffab480 | ||
|
|
aeb568c494 | ||
|
|
b07f575d07 | ||
|
|
ebae647dfa | ||
|
|
6fd5b469bc | ||
|
|
78e822dd79 | ||
|
|
a79db20bcd | ||
|
|
d67ef19f6e | ||
|
|
037a6bd1b0 | ||
|
|
09b884aade | ||
|
|
243bcc9d07 | ||
|
|
64cf9ac911 | ||
|
|
15a3ec8fa1 | ||
|
|
2b8af4d23f | ||
|
|
5755e31ef0 | ||
|
|
f4c787ab74 | ||
|
|
4d7b6e14b8 | ||
|
|
9ea7d143dd | ||
|
|
927e721a25 | ||
|
|
bd46f01eb4 | ||
|
|
5f4d7154c0 | ||
|
|
bad8a01850 | ||
|
|
d808c3848a | ||
|
|
3f0bec2ee9 | ||
|
|
8fb9eb2fee | ||
|
|
01fa15a7d9 | ||
|
|
6aaa5d7a75 | ||
|
|
b4d3411637 | ||
|
|
01ddc8ca6c | ||
|
|
16c1e7149e | ||
|
|
0374ea2c79 | ||
|
|
2e2451f8ec | ||
|
|
bd1e3448b3 | ||
|
|
20909fa294 | ||
|
|
c502e00c87 | ||
|
|
9172c9b3dd | ||
|
|
78deab05f9 | ||
|
|
6c9d377bbb | ||
|
|
62ddc9b7f9 | ||
|
|
448ae49fa4 | ||
|
|
5f3c276d0a | ||
|
|
9cea493f58 | ||
|
|
400d1a4468 | ||
|
|
851ce0f4fe | ||
|
|
cc885bd39a | ||
|
|
a1a8c21c70 | ||
|
|
6ef4bd8c45 | ||
|
|
fb316123ec | ||
|
|
270af61665 | ||
|
|
155f4f2e21 | ||
|
|
eaf593884b | ||
|
|
930574c6e9 | ||
|
|
c1586a9866 | ||
|
|
432eb74814 | ||
|
|
92fb644fb6 | ||
|
|
bb92af693a | ||
|
|
eb5fd07131 | ||
|
|
b2ce7c9aa6 | ||
|
|
d6b46e41dd | ||
|
|
254c6ef1dd | ||
|
|
547f943851 | ||
|
|
8611c9f6f7 | ||
|
|
f6576237a4 | ||
|
|
207b64865e | ||
|
|
9ee1b21ec2 | ||
|
|
55a12bd639 | ||
|
|
3b4a57dab9 | ||
|
|
afe158f878 | ||
|
|
722a3f32cc | ||
|
|
9183618082 | ||
|
|
18ebca3979 | ||
|
|
e84d2c9e5f | ||
|
|
e98b61ceeb | ||
|
|
19f9fca2f6 | ||
|
|
7dbf1ab66f | ||
|
|
bfe272bf67 | ||
|
|
cce8936802 | ||
|
|
043bf3567d | ||
|
|
1b2f2c0d69 | ||
|
|
4b376514a6 | ||
|
|
c27e6a89b0 | ||
|
|
76c6e3b15c | ||
|
|
48ec00cc1a | ||
|
|
866600a73b | ||
|
|
d8f4cd4d5f | ||
|
|
d0810b602a | ||
|
|
d5afcf9e34 | ||
|
|
07c4d95f38 | ||
|
|
fd83071b6b | ||
|
|
e042d9089f | ||
|
|
cdfa0beafb | ||
|
|
ddb3b17e96 | ||
|
|
32f71c97ea | ||
|
|
53853116fb | ||
|
|
a887856998 | ||
|
|
0df7466d2b | ||
|
|
23579e3b99 | ||
|
|
3affba6fa6 | ||
|
|
542677d807 | ||
|
|
d481aa8613 | ||
|
|
15e633fa8b | ||
|
|
732c24c624 | ||
|
|
75dfdbf211 | ||
|
|
5f07113a4b | ||
|
|
6a380bcc67 | ||
|
|
97276ce765 | ||
|
|
a23a7a807f | ||
|
|
c6a4caaf7e | ||
|
|
5574733dcb | ||
|
|
49c21d0b6e | ||
|
|
0ea2ce3674 | ||
|
|
3ddf824251 | ||
|
|
68fdff3c27 | ||
|
|
c003485ae3 | ||
|
|
99d5080191 | ||
|
|
2ad217eedd | ||
|
|
a3eb7f1142 | ||
|
|
6d620b6d88 | ||
|
|
9f8357ada7 | ||
|
|
e5a1bcb1ce | ||
|
|
46484022b0 | ||
|
|
ab746ef891 | ||
|
|
6241c1ae43 | ||
|
|
0f8d3fe6a3 | ||
|
|
07afb9e700 | ||
|
|
3165093feb | ||
|
|
4af0c1d902 | ||
|
|
82f9554474 | ||
|
|
d8d53b7aa0 | ||
|
|
8c9048259a | ||
|
|
0361acbde4 | ||
|
|
f5bf0a48f3 | ||
|
|
cec843dd8c | ||
|
|
54a9488e59 | ||
|
|
29fe0177bd | ||
|
|
0e134364ac | ||
|
|
0d8350d95d | ||
|
|
497e373e31 | ||
|
|
ed8fea4aa5 | ||
|
|
4d7f75dd76 | ||
|
|
53e757aea9 | ||
|
|
f578896745 | ||
|
|
13be03cb86 | ||
|
|
864e02409e | ||
|
|
fccc779a15 | ||
|
|
77c7077458 | ||
|
|
ffd4ab222c | ||
|
|
676277e255 | ||
|
|
c0a5283416 | ||
|
|
e405ca7733 | ||
|
|
580d72f0f6 | ||
|
|
22daeaa6f3 | ||
|
|
ca9ad7935a | ||
|
|
dd6e229327 | ||
|
|
4edcd16f5f | ||
|
|
534e3acd06 | ||
|
|
cf75ddabd3 | ||
|
|
6edcacf932 | ||
|
|
51506a7ccd | ||
|
|
a50fd2a2a2 | ||
|
|
efac7e18fb | ||
|
|
02dd8eacea | ||
|
|
e2984227bb | ||
|
|
a35ee30d9f | ||
|
|
f689db4422 | ||
|
|
fb4600f6f9 | ||
|
|
1d0c75b0c2 | ||
|
|
2e435af4de | ||
|
|
62a7765e57 | ||
|
|
5ad942ed87 | ||
|
|
96609e3d6e | ||
|
|
28a8ebce5b | ||
|
|
17854663de | ||
|
|
a4a6b5930a | ||
|
|
e9ae620844 | ||
|
|
e7cfb8ad8e | ||
|
|
786a1c212c | ||
|
|
03bc270730 | ||
|
|
7cef042231 | ||
|
|
1155cc0d3f | ||
|
|
13c3f67ab0 | ||
|
|
ab2cdd840f | ||
|
|
143285e2b7 | ||
|
|
19e8c8d42a | ||
|
|
de9c4a3611 | ||
|
|
d7ad13d929 | ||
|
|
f9d50f677d | ||
|
|
3641993fab | ||
|
|
93d3ae04c7 | ||
|
|
e25e9fbf22 | ||
|
|
fe26d6116d | ||
|
|
06e1e18793 | ||
|
|
23d17b76be | ||
|
|
dfe4e78a77 | ||
|
|
59e7982040 | ||
|
|
c6b0587dc0 | ||
|
|
9baa4d1c3c | ||
|
|
a5390dbbeb | ||
|
|
019a486d5b | ||
|
|
34ce11d2be | ||
|
|
88e8ede0aa | ||
|
|
f6f250c507 | ||
|
|
2c45e901f0 | ||
|
|
9e1443799a | ||
|
|
dd72aff98a | ||
|
|
5039f9be48 | ||
|
|
56b582cdb9 | ||
|
|
745c0af571 | ||
|
|
2dabbe83ac | ||
|
|
90561176fb | ||
|
|
a0b5d77427 | ||
|
|
ce8b1cd861 | ||
|
|
5bd6e3d1e9 | ||
|
|
d4d7a6a55e | ||
|
|
b3e0672535 | ||
|
|
a407437e92 | ||
|
|
0164a4e686 | ||
|
|
b8ea79042c | ||
|
|
49a6533bc1 | ||
|
|
21459f3cc0 | ||
|
|
04f7e52803 | ||
|
|
25535e5eac | ||
|
|
c37b6b1a43 | ||
|
|
0328878e46 | ||
|
|
67132ecaec | ||
|
|
120cfcde70 | ||
|
|
37db2a0e44 | ||
|
|
f92816899c | ||
|
|
5386ffc8f2 | ||
|
|
3898d534f3 | ||
|
|
34333d9fa8 | ||
|
|
14e852ee9d | ||
|
|
37623732c6 | ||
|
|
adbcc83fa5 | ||
|
|
d4ea6632ca | ||
|
|
21849fd0d9 | ||
|
|
ac20ee41ca | ||
|
|
d376fb1df2 | ||
|
|
8e63a08d7f | ||
|
|
3aee5b3c55 | ||
|
|
0145d98314 | ||
|
|
4c03c71a5f | ||
|
|
82e2801aae | ||
|
|
dc5d5de35c | ||
|
|
41cc4530f3 | ||
|
|
c5acb10151 | ||
|
|
8c8ee9905c | ||
|
|
e2afd00118 | ||
|
|
5b82576dbf | ||
|
|
b9d9f9edb0 | ||
|
|
c40b706186 | ||
|
|
351fc609b1 | ||
|
|
a6f6a9dcdf | ||
|
|
267a09001d | ||
|
|
37db2abecd | ||
|
|
0272d44192 | ||
|
|
6b17044f8e | ||
|
|
844e8fb8bd | ||
|
|
ca9182d6ae | ||
|
|
ec20c48523 | ||
|
|
ec68b72387 | ||
|
|
2d1c2725e4 | ||
|
|
1680c5af8f | ||
|
|
88c0d78639 | ||
|
|
559157cb58 | ||
|
|
10bf8a769e | ||
|
|
f31ab6d178 | ||
|
|
f08bb5bf66 | ||
|
|
e2886aaddf | ||
|
|
71227986db | ||
|
|
8c6ff1fa7e | ||
|
|
9d1b786a39 | ||
|
|
8ee0e2efb1 | ||
|
|
d66f5e8db0 | ||
|
|
02d6d28ec4 | ||
|
|
1cafc12e8c | ||
|
|
98fa93f6d1 | ||
|
|
c6c67a29eb | ||
|
|
ad5395e953 | ||
|
|
1925ceaade | ||
|
|
8aebf12797 | ||
|
|
ffeefe8a56 | ||
|
|
81ce5e4961 | ||
|
|
638058ef91 | ||
|
|
b1b62f70d3 | ||
|
|
823d89af9a | ||
|
|
448fff9a04 | ||
|
|
6257a6035c | ||
|
|
54c0f17f2a | ||
|
|
d049cbb59e | ||
|
|
00e853144e | ||
|
|
5ac8cfee56 | ||
|
|
496ae8adf6 | ||
|
|
2001cc0831 | ||
|
|
a35490bf70 | ||
|
|
01877e5a0f | ||
|
|
614d79121a | ||
|
|
3a6f1f5cd7 | ||
|
|
4a31961c4f | ||
|
|
7be9855a70 | ||
|
|
6f8112ff67 | ||
|
|
67fc227684 | ||
|
|
7edb4c0162 | ||
|
|
5db4493557 | ||
|
|
a85c0b0cc2 | ||
|
|
52443c2444 | ||
|
|
4444d2d102 | ||
|
|
08a1ca434b | ||
|
|
a9ce86f9a3 | ||
|
|
3167292c2f | ||
|
|
ec7cc2b3c3 | ||
|
|
2a2fcf1012 | ||
|
|
6d62267227 | ||
|
|
dfd8ad2fff | ||
|
|
43506f8086 | ||
|
|
ec3cee89d3 | ||
|
|
a171497a8b | ||
|
|
c6ad15e3b8 | ||
|
|
9a81885b51 | ||
|
|
3d291da0f6 | ||
|
|
43bf177ff7 | ||
|
|
c446651be8 | ||
|
|
6c407dbe15 | ||
|
|
5a59b49b6b | ||
|
|
8b9f3bbe29 | ||
|
|
8e6a469f98 | ||
|
|
f650a543ef | ||
|
|
683178a1f4 | ||
|
|
3937233e1e | ||
|
|
c571200812 | ||
|
|
04a663829b | ||
|
|
6b4a2c1c4e | ||
|
|
f73be767a4 | ||
|
|
4120dffcc1 | ||
|
|
53ff5bb205 | ||
|
|
397f428c48 | ||
|
|
c5a2c9b046 | ||
|
|
b98d7f6634 | ||
|
|
beea4d5119 | ||
|
|
8e507075d1 | ||
|
|
be783a1856 | ||
|
|
450c366403 | ||
|
|
1dbdc48a77 | ||
|
|
d7cb17848d | ||
|
|
f3c8b7a948 | ||
|
|
914fbe242c | ||
|
|
f746b2fe85 | ||
|
|
a131da2c35 | ||
|
|
60e4cb6f6f | ||
|
|
0a8b1fe5de | ||
|
|
b24c83db21 | ||
|
|
4f386a1ccd | ||
|
|
ab849b3613 | ||
|
|
10aee3926a | ||
|
|
4583b61e3d | ||
|
|
483e9e1ee3 | ||
|
|
c2dfc3e5b3 | ||
|
|
a9bd0c8de6 | ||
|
|
1628ca0d46 | ||
|
|
313493d51b | ||
|
|
6d18f60725 | ||
|
|
d74662a751 | ||
|
|
d05fd2a14f | ||
|
|
097be753ab | ||
|
|
50c9678cea | ||
|
|
7672cde4f3 | ||
|
|
a4c65532ea | ||
|
|
46b080c092 | ||
|
|
0edf6478e3 | ||
|
|
f7cdf318db | ||
|
|
6f3682c12f | ||
|
|
e3d593d40c | ||
|
|
83551bb02e | ||
|
|
430bf0d5eb | ||
|
|
dd71f5d968 | ||
|
|
7db1c506f2 | ||
|
|
959cd938bc | ||
|
|
03b07c280c | ||
|
|
705e8f2fe0 | ||
|
|
591fc3609a | ||
|
|
b4a3d1b9ed | ||
|
|
84219b95ab | ||
|
|
2c78f56d48 | ||
|
|
a61d219bcd | ||
|
|
652d3cdee4 | ||
|
|
bb1fc5b86d | ||
|
|
efd3c6449b | ||
|
|
0ac5795f8c | ||
|
|
2f77651f64 | ||
|
|
8daacc2b99 | ||
|
|
87fa9f8e46 | ||
|
|
1392793334 | ||
|
|
0e427216db | ||
|
|
0c56c46523 | ||
|
|
617c5d5e1b | ||
|
|
ec2024b907 | ||
|
|
458845ce89 | ||
|
|
57bde20acd | ||
|
|
b0248e8070 | ||
|
|
b285efb476 | ||
|
|
be6f29930b | ||
|
|
653bc23728 | ||
|
|
47b04580db | ||
|
|
919e28b8bf | ||
|
|
3a3bc5a1ae | ||
|
|
133c006c37 | ||
|
|
e455940f71 | ||
|
|
65528fd009 | ||
|
|
691159fe94 | ||
|
|
09858c0c5a | ||
|
|
5fd0abb2b9 | ||
|
|
36c37ffec1 | ||
|
|
6a5b7664f7 | ||
|
|
ebac854512 | ||
|
|
cafce96185 | ||
|
|
92ffcf9f86 | ||
|
|
64cdbb67b5 | ||
|
|
528fc899fb | ||
|
|
d452f502c3 | ||
|
|
5d6b8141ec | ||
|
|
776e5ea83a | ||
|
|
c9acc06a49 | ||
|
|
a2dca02594 | ||
|
|
210331bf61 | ||
|
|
5b5fdc6bc5 | ||
|
|
0de74864b6 | ||
|
|
7042a11875 | ||
|
|
028832ce73 | ||
|
|
c7c9356af5 | ||
|
|
3292c112c5 | ||
|
|
126d71b332 | ||
|
|
df9be72a82 | ||
|
|
6aa8f7409a | ||
|
|
10c884552c | ||
|
|
2617610741 | ||
|
|
53dd174f3d | ||
|
|
65987f5970 | ||
|
|
1fc864fb32 | ||
|
|
22abcc9fd2 | ||
|
|
178cf821de | ||
|
|
f4a571786c | ||
|
|
c0a8ffcbef | ||
|
|
76eeca9eae | ||
|
|
8114744bce | ||
|
|
db5d582404 | ||
|
|
01dbbff62e | ||
|
|
afa61e702c | ||
|
|
546dc82793 | ||
|
|
d9c4d95ab3 | ||
|
|
0fb1b75a02 | ||
|
|
38f1f5b177 | ||
|
|
0d2a62a927 | ||
|
|
b75c207208 | ||
|
|
dff223f195 | ||
|
|
d2fef05fe4 | ||
|
|
188b9f8cf1 | ||
|
|
daf3162bcf | ||
|
|
5e59e7f442 | ||
|
|
fce22c0065 | ||
|
|
bb3272e837 | ||
|
|
cb5b5635c7 | ||
|
|
66ed0ca726 | ||
|
|
b16e50851a | ||
|
|
90d23139f6 | ||
|
|
5ea9398b63 | ||
|
|
3f804224f4 | ||
|
|
cf0bf159ab | ||
|
|
6654b66114 | ||
|
|
9bf78d0373 | ||
|
|
0c1fb8b2b7 | ||
|
|
966654e23a | ||
|
|
13b65104eb | ||
|
|
4a35aff33c | ||
|
|
09b6d939f5 | ||
|
|
4e79de106a | ||
|
|
b437d9461a | ||
|
|
910af597a1 | ||
|
|
c10965ecfb | ||
|
|
5efb880772 | ||
|
|
6c3b7d5820 | ||
|
|
c5109408c0 | ||
|
|
a3c06b9bbe | ||
|
|
2aa264b33c |
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
@@ -1,4 +0,0 @@
|
||||
github: [ncw]
|
||||
patreon: njcw
|
||||
liberapay: ncw
|
||||
custom: ["https://rclone.org/donate/"]
|
||||
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
65
.github/workflows/build.yml
vendored
65
.github/workflows/build.yml
vendored
@@ -8,29 +8,31 @@ name: build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
- '**'
|
||||
tags:
|
||||
- '*'
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -41,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.21.0-rc.3'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.19.x'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -57,14 +59,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.19.x'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.19.x'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -74,20 +76,20 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.21.0-rc.3'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.17
|
||||
- job_name: go1.19
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.19'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.18
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -102,9 +104,8 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
@@ -123,12 +124,17 @@ jobs:
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||
unset HOMEBREW_NO_INSTALL_FROM_API
|
||||
brew untap --force homebrew/core
|
||||
brew untap --force homebrew/cask
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macos-11'
|
||||
@@ -216,10 +222,10 @@ jobs:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -234,8 +240,21 @@ jobs:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
# Run govulncheck on the latest go version, the one we build binaries with
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21.0-rc.3'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -248,9 +267,9 @@ jobs:
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: '1.21.0-rc.3'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
@@ -338,4 +357,4 @@ jobs:
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
61
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
61
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
26
.github/workflows/build_publish_docker_image.yml
vendored
26
.github/workflows/build_publish_docker_image.yml
vendored
@@ -1,26 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
14
.github/workflows/winget.yml
vendored
Normal file
14
.github/workflows/winget.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Publish to Winget
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: windows-latest # Action can only run on Windows
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: Rclone.Rclone
|
||||
installers-regex: '-windows-\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
@@ -2,15 +2,17 @@
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
- staticcheck
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
@@ -25,6 +27,30 @@ issues:
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
rules:
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
|
||||
@@ -419,7 +419,7 @@ remote or an fs.
|
||||
|
||||
Research
|
||||
|
||||
* Look at the interfaces defined in `fs/fs.go`
|
||||
* Look at the interfaces defined in `fs/types.go`
|
||||
* Study one or more of the existing remotes
|
||||
|
||||
Getting going
|
||||
|
||||
@@ -11,7 +11,7 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
@@ -16,6 +16,9 @@ Current active maintainers of rclone are:
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
| albertony | @albertony | |
|
||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
7712
MANUAL.html
generated
7712
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
8558
MANUAL.txt
generated
8558
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
5
Makefile
5
Makefile
@@ -81,6 +81,9 @@ quicktest:
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@@ -93,7 +96,7 @@ build_dep:
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
|
||||
13
README.md
13
README.md
@@ -25,18 +25,19 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
@@ -45,12 +46,13 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
@@ -60,23 +62,30 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
|
||||
@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
|
||||
## Making a release
|
||||
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull
|
||||
* git pull # IMPORTANT
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
* make test # see integration test server or run locally
|
||||
@@ -21,6 +21,7 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
@@ -74,8 +75,7 @@ Set vars
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
@@ -35,8 +34,11 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/netstorage"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
@@ -44,6 +46,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/smb"
|
||||
_ "github.com/rclone/rclone/backend/storj"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,10 +6,10 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -17,10 +17,31 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{},
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration2 runs integration tests against the remote
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestAzureBlob:"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name,
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -32,36 +53,6 @@ var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"password": "my secret",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotNil(t, tokenRefresher)
|
||||
}
|
||||
}
|
||||
|
||||
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||
}
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
accessTier string
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
const (
|
||||
azureResource = "https://storage.azure.com"
|
||||
imdsAPIVersion = "2018-02-01"
|
||||
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
)
|
||||
|
||||
// This custom type is used to add the port the test server has bound to
|
||||
// to the request context.
|
||||
type testPortKey string
|
||||
|
||||
type msiIdentifierType int
|
||||
|
||||
const (
|
||||
msiClientID msiIdentifierType = iota
|
||||
msiObjectID
|
||||
msiResourceID
|
||||
)
|
||||
|
||||
type userMSI struct {
|
||||
Type msiIdentifierType
|
||||
Value string
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
Response *http.Response
|
||||
}
|
||||
|
||||
func (e httpError) Error() string {
|
||||
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||
}
|
||||
|
||||
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||
// Metadata Service.
|
||||
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||
result := adal.Token{}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||
return result, err
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("resource", azureResource)
|
||||
params.Set("api-version", imdsAPIVersion)
|
||||
|
||||
// Specify user-assigned identity if requested.
|
||||
if identity != nil {
|
||||
switch identity.Type {
|
||||
case msiClientID:
|
||||
params.Set("client_id", identity.Value)
|
||||
case msiObjectID:
|
||||
params.Set("object_id", identity.Value)
|
||||
case msiResourceID:
|
||||
params.Set("mi_res_id", identity.Value)
|
||||
default:
|
||||
// If this happens, the calling function and this one don't agree on
|
||||
// what valid ID types exist.
|
||||
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||
}
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
// The Metadata header is required by all calls to IMDS.
|
||||
req.Header.Set("Metadata", "true")
|
||||
|
||||
// If this function is run in a test, query the test server instead of IMDS.
|
||||
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||
if isTest {
|
||||
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||
req.Host = req.URL.Host
|
||||
}
|
||||
|
||||
// Send request
|
||||
httpClient := fshttp.NewClient(ctx)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||
}
|
||||
defer func() { // resp and Body should not be nil
|
||||
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||
}
|
||||
}()
|
||||
// Check if the status code indicates success
|
||||
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||
switch resp.StatusCode {
|
||||
case 200, 201, 202:
|
||||
break
|
||||
default:
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||
return result, httpError{Response: resp}
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
|
||||
}
|
||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
|
||||
// This would be a good place to persist the token if a large number of rclone
|
||||
// invocations are being made in a short amount of time. If the token is
|
||||
// persisted, the azureblob code will need to check for expiry before every
|
||||
// storage API call.
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
require.NoError(t, err)
|
||||
parameters := r.URL.Query()
|
||||
(*actual)["path"] = r.URL.Path
|
||||
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||
(*actual)["method"] = r.Method
|
||||
for paramName := range parameters {
|
||||
(*actual)[paramName] = parameters.Get(paramName)
|
||||
}
|
||||
// Make response.
|
||||
response := adal.Token{}
|
||||
responseBytes, err := json.Marshal(response)
|
||||
require.NoError(t, err)
|
||||
_, err = w.Write(responseBytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestManagedIdentity(t *testing.T) {
|
||||
// test user-assigned identity specifiers to use
|
||||
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||
tests := []struct {
|
||||
identity *userMSI
|
||||
identityParameterName string
|
||||
expectedAbsent []string
|
||||
}{
|
||||
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||
}
|
||||
alwaysExpected := map[string]string{
|
||||
"path": "/metadata/identity/oauth2/token",
|
||||
"resource": "https://storage.azure.com",
|
||||
"Metadata": "true",
|
||||
"api-version": "2018-02-01",
|
||||
"method": "GET",
|
||||
}
|
||||
for _, test := range tests {
|
||||
actual := make(map[string]string, 10)
|
||||
testServer := httptest.NewServer(handler(t, &actual))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, test.identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate expected query parameters present
|
||||
expected := make(map[string]string)
|
||||
for k, v := range alwaysExpected {
|
||||
expected[k] = v
|
||||
}
|
||||
if test.identity != nil {
|
||||
expected[test.identityParameterName] = test.identity.Value
|
||||
}
|
||||
|
||||
for key := range expected {
|
||||
value, exists := actual[key]
|
||||
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||
test.identityParameterName, key) {
|
||||
assert.Equalf(t, expected[key], value,
|
||||
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate unexpected query parameters absent
|
||||
for _, key := range test.expectedAbsent {
|
||||
_, exists := actual[key]
|
||||
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errorHandler(resultCode int) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Test error generated", resultCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIMDSErrors(t *testing.T) {
|
||||
errorCodes := []int{404, 429, 500}
|
||||
for _, code := range errorCodes {
|
||||
testServer := httptest.NewServer(errorHandler(code))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, nil)
|
||||
require.Error(t, err)
|
||||
httpErr, ok := err.(httpError)
|
||||
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||
}
|
||||
}
|
||||
@@ -75,13 +75,15 @@ func init() {
|
||||
Description: "Backblaze B2",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
@@ -1221,7 +1223,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object.Name, "Can't create object %v", err)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(ctx, err)
|
||||
@@ -1235,7 +1237,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
if err != nil {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
if oldOnly && last != remote {
|
||||
// Check current version of the file
|
||||
if object.Action == "hide" {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -428,18 +430,47 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
uploadPool *pool.Pool
|
||||
ci = fs.GetConfig(ctx)
|
||||
)
|
||||
// If using large chunk size then make a temporary pool
|
||||
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
||||
uploadPool = up.f.pool
|
||||
} else {
|
||||
uploadPool = pool.New(
|
||||
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
||||
int(up.chunkSize),
|
||||
ci.Transfers,
|
||||
up.f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
defer uploadPool.Flush()
|
||||
}
|
||||
// Get an upload token and a buffer
|
||||
getBuf := func() (buf []byte) {
|
||||
up.f.getBuf(true)
|
||||
if !up.doCopy {
|
||||
buf = uploadPool.Get()
|
||||
}
|
||||
return buf
|
||||
}
|
||||
// Put an upload token and a buffer
|
||||
putBuf := func(buf []byte) {
|
||||
if !up.doCopy {
|
||||
uploadPool.Put(buf)
|
||||
}
|
||||
up.f.putBuf(nil, true)
|
||||
}
|
||||
g.Go(func() error {
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
buf := up.f.getBuf(up.doCopy)
|
||||
buf := getBuf()
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
putBuf(buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -453,14 +484,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
buf = buf[:reqSize]
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
putBuf(buf)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putBuf(buf, up.doCopy)
|
||||
defer putBuf(buf)
|
||||
if !up.doCopy {
|
||||
err = up.transferChunk(gCtx, part, buf)
|
||||
} else {
|
||||
|
||||
@@ -17,9 +17,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -45,7 +46,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -76,6 +76,11 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
type boxCustomClaims struct {
|
||||
jwt.StandardClaims
|
||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -102,16 +107,18 @@ func init() {
|
||||
return nil, nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
@@ -178,12 +185,12 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(ctx)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
file, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
}
|
||||
@@ -194,34 +201,31 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
return boxConfig, nil
|
||||
}
|
||||
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
"aud": tokenURL,
|
||||
"jti": val,
|
||||
claims = &boxCustomClaims{
|
||||
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
Id: val,
|
||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||
Subject: boxConfig.EnterpriseID,
|
||||
Audience: tokenURL,
|
||||
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
|
||||
},
|
||||
BoxSubType: boxSubType,
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||
signingHeaders := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
||||
signingHeaders := map[string]interface{}{
|
||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
|
||||
return signingHeaders
|
||||
}
|
||||
|
||||
|
||||
18
backend/cache/cache.go
vendored
18
backend/cache/cache.go
vendored
@@ -76,17 +76,19 @@ func init() {
|
||||
Name: "plex_url",
|
||||
Help: "The URL of the Plex server.",
|
||||
}, {
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "plex_password",
|
||||
Help: "The password of the Plex user.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "plex_insecure",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
||||
@@ -1038,7 +1040,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||
}
|
||||
entries = nil
|
||||
entries = nil //nolint:ineffassign
|
||||
|
||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||
var batchDirectories []*Directory
|
||||
@@ -1787,7 +1789,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// StopBackgroundRunners will signall all the runners to stop their work
|
||||
// StopBackgroundRunners will signal all the runners to stop their work
|
||||
// can be triggered from a terminate signal or from testing between runs
|
||||
func (f *Fs) StopBackgroundRunners() {
|
||||
f.cleanupChan <- false
|
||||
|
||||
89
backend/cache/cache_internal_test.go
vendored
89
backend/cache/cache_internal_test.go
vendored
@@ -11,7 +11,6 @@ import (
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -102,14 +101,12 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
|
||||
// Instantiate inner fs
|
||||
innerFolder := "inner"
|
||||
runInstance.mkdir(t, rootFs, innerFolder)
|
||||
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
|
||||
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
|
||||
|
||||
runInstance.writeObjectString(t, rootFs2, "one", "content")
|
||||
listRoot, err := runInstance.list(t, rootFs, "")
|
||||
@@ -167,7 +164,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
|
||||
for _, r := range li2 {
|
||||
var err error
|
||||
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
||||
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
||||
if err != nil || len(ci) == 0 {
|
||||
log.Printf("========== '%v' not in cache", r)
|
||||
} else {
|
||||
@@ -226,8 +223,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
|
||||
func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -259,8 +255,7 @@ func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
|
||||
func TestInternalObjNotFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tionf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||
require.Error(t, err)
|
||||
@@ -270,8 +265,7 @@ func TestInternalObjNotFound(t *testing.T) {
|
||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -298,8 +292,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
// write the object
|
||||
runInstance.writeRemoteString(t, rootFs, "one", "one content")
|
||||
@@ -317,8 +310,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
var err error
|
||||
|
||||
// create some rand test data
|
||||
@@ -347,8 +339,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
}
|
||||
@@ -378,8 +369,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
}
|
||||
@@ -405,8 +395,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -460,8 +449,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
|
||||
func TestInternalMoveWithNotify(t *testing.T) {
|
||||
id := fmt.Sprintf("timwn%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
@@ -547,8 +535,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
|
||||
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
@@ -634,8 +621,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
|
||||
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -667,8 +653,7 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -689,8 +674,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -725,8 +709,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -763,9 +746,7 @@ func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skipf("skipping crypt")
|
||||
@@ -841,7 +822,7 @@ func newRun() *run {
|
||||
}
|
||||
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||
}
|
||||
@@ -866,7 +847,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
||||
return enc
|
||||
}
|
||||
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.FileSections() {
|
||||
@@ -959,10 +940,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
err = f.Mkdir(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
runInstance.cleanupFs(t, f)
|
||||
})
|
||||
|
||||
return f, boltDb
|
||||
}
|
||||
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
||||
err := f.Features().Purge(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
@@ -984,7 +970,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
chunk := int64(1024)
|
||||
cnt := size / chunk
|
||||
left := size % chunk
|
||||
f, err := ioutil.TempFile("", "rclonecache-tempfile")
|
||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cnt); i++ {
|
||||
@@ -1112,27 +1098,6 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = in.Close()
|
||||
}()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = out.Close()
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
|
||||
28
backend/cache/cache_upload_test.go
vendored
28
backend/cache/cache_upload_test.go
vendored
@@ -21,10 +21,8 @@ import (
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
@@ -63,9 +61,7 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
@@ -73,19 +69,15 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
@@ -119,10 +111,8 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
@@ -162,21 +152,19 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
@@ -213,9 +201,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
@@ -343,9 +329,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
|
||||
4
backend/cache/plex.go
vendored
4
backend/cache/plex.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
|
||||
continue
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
data, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
5
backend/cache/storage_persistent.go
vendored
5
backend/cache/storage_persistent.go
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -473,7 +472,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
|
||||
var data []byte
|
||||
|
||||
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
||||
data, err := ioutil.ReadFile(fp)
|
||||
data, err := os.ReadFile(fp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -486,7 +485,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
|
||||
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
||||
|
||||
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
||||
err := ioutil.WriteFile(filePath, data, os.ModePerm)
|
||||
err := os.WriteFile(filePath, data, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -1038,7 +1037,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metadata, err := ioutil.ReadAll(reader)
|
||||
metadata, err := io.ReadAll(reader)
|
||||
_ = reader.Close() // ensure file handle is freed on windows
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1097,7 +1096,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
data, err := io.ReadAll(reader)
|
||||
_ = reader.Close() // ensure file handle is freed on windows
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
assert.NoError(t, err)
|
||||
var chunkContents []byte
|
||||
assert.NotPanics(t, func() {
|
||||
chunkContents, err = ioutil.ReadAll(r)
|
||||
chunkContents, err = io.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
r, err = willyChunk.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotPanics(t, func() {
|
||||
_, err = ioutil.ReadAll(r)
|
||||
_, err = io.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
assert.NoError(t, err, "open "+description)
|
||||
assert.NotNil(t, r, "open stream of "+description)
|
||||
if err == nil && r != nil {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err, "read all of "+description)
|
||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||
_ = r.Close()
|
||||
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
|
||||
assert.Error(t, err)
|
||||
|
||||
// Rcat must fail
|
||||
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||
in := io.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
|
||||
assert.Nil(t, robj)
|
||||
assert.NotNil(t, err)
|
||||
if err != nil {
|
||||
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
r, err := dstFile.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package combine implents a backend to combine multiple remotes in a directory tree
|
||||
// Package combine implements a backend to combine multiple remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
@@ -233,6 +233,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
for _, u := range f.upstreams {
|
||||
@@ -289,6 +290,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// Enable CleanUp when any upstreams support it
|
||||
if features.CleanUp == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().CleanUp != nil {
|
||||
features.CleanUp = f.CleanUp
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable ChangeNotify when any upstreams support it
|
||||
if features.ChangeNotify == nil {
|
||||
for _, u := range f.upstreams {
|
||||
@@ -299,6 +310,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// show that we wrap other backends
|
||||
features.Overlay = true
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
@@ -351,7 +365,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// join the elements together but unline path.Join return empty string
|
||||
// join the elements together but unlike path.Join return empty string
|
||||
func join(elem ...string) string {
|
||||
result := path.Join(elem...)
|
||||
if result == "." {
|
||||
@@ -631,7 +645,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uSrc := operations.NewOverrideRemote(src, uRemote)
|
||||
uSrc := fs.NewOverrideRemote(src, uRemote)
|
||||
var o fs.Object
|
||||
if stream {
|
||||
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
|
||||
@@ -887,6 +901,100 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
})
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
do := u.f.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uRemote, expire, unlink)
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
//
|
||||
// May create duplicates or return errors if src already
|
||||
// exists.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
u, uRemote, err := f.findUpstream(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do := u.f.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
uSrc := fs.NewOverrideRemote(src, uRemote)
|
||||
return do(ctx, in, uSrc, options...)
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
u *upstream
|
||||
uDirs []fs.Directory
|
||||
)
|
||||
for _, dir := range dirs {
|
||||
uNew, uDir, err := f.findUpstream(dir.Remote())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u == nil {
|
||||
u = uNew
|
||||
} else if u != uNew {
|
||||
return fmt.Errorf("can't merge directories from different upstreams")
|
||||
}
|
||||
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
|
||||
}
|
||||
do := u.f.Features().MergeDirs
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uDirs)
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
if do := u.f.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do := u.f.Features().OpenWriterAt
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uRemote, size)
|
||||
}
|
||||
|
||||
// Object describes a wrapped Object
|
||||
//
|
||||
// This is a wrapped Object which knows its path prefix
|
||||
@@ -916,7 +1024,7 @@ func (o *Object) String() string {
|
||||
func (o *Object) Remote() string {
|
||||
newPath, err := o.u.pathAdjustment.do(o.Object.String())
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Bad object: %v", err)
|
||||
fs.Errorf(o.Object, "Bad object: %v", err)
|
||||
return err.Error()
|
||||
}
|
||||
return newPath
|
||||
@@ -988,5 +1096,10 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -10,6 +10,11 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
@@ -17,8 +22,8 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,7 +40,9 @@ func TestLocal(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -51,7 +58,9 @@ func TestMemory(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -68,6 +77,8 @@ func TestMixed(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -187,6 +186,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// We support reading MIME types no matter the wrapped fs
|
||||
f.features.ReadMimeType = true
|
||||
@@ -468,7 +468,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
|
||||
tempFile, err := ioutil.TempFile("", "rclone-press-")
|
||||
tempFile, err := os.CreateTemp("", "rclone-press-")
|
||||
defer func() {
|
||||
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
|
||||
// to ignore them
|
||||
@@ -546,8 +546,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
removeErr := o.Remove(ctx)
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
@@ -37,7 +38,6 @@ const (
|
||||
blockHeaderSize = secretbox.Overhead
|
||||
blockDataSize = 64 * 1024
|
||||
blockSize = blockHeaderSize + blockDataSize
|
||||
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
|
||||
)
|
||||
|
||||
// Errors returned by cipher
|
||||
@@ -53,8 +53,9 @@ var (
|
||||
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
||||
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
||||
ErrorFileClosed = errors.New("file already closed")
|
||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
|
||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
|
||||
ErrorBadSeek = errors.New("Seek beyond end of file")
|
||||
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
|
||||
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
||||
obfuscQuoteRune = '!'
|
||||
)
|
||||
@@ -169,27 +170,30 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
passBadBlocks bool // if set passed bad blocks as zeroed blocks
|
||||
encryptedSuffix string
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
encryptedSuffix: ".bin",
|
||||
}
|
||||
c.buffers.New = func() interface{} {
|
||||
return make([]byte, blockSize)
|
||||
return new([blockSize]byte)
|
||||
}
|
||||
err := c.Key(password, salt)
|
||||
if err != nil {
|
||||
@@ -198,11 +202,29 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// setEncryptedSuffix set suffix, or an empty string
|
||||
func (c *Cipher) setEncryptedSuffix(suffix string) {
|
||||
if strings.EqualFold(suffix, "none") {
|
||||
c.encryptedSuffix = ""
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(suffix, ".") {
|
||||
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
|
||||
suffix = "." + suffix
|
||||
}
|
||||
c.encryptedSuffix = suffix
|
||||
}
|
||||
|
||||
// Call to set bad block pass through
|
||||
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
|
||||
c.passBadBlocks = passBadBlocks
|
||||
}
|
||||
|
||||
// Key creates all the internal keys from the password passed in using
|
||||
// scrypt.
|
||||
//
|
||||
// If salt is "" we use a fixed salt just to make attackers lives
|
||||
// slighty harder than using no salt.
|
||||
// slightly harder than using no salt.
|
||||
//
|
||||
// Note that empty password makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
@@ -230,15 +252,12 @@ func (c *Cipher) Key(password, salt string) (err error) {
|
||||
}
|
||||
|
||||
// getBlock gets a block from the pool of size blockSize
|
||||
func (c *Cipher) getBlock() []byte {
|
||||
return c.buffers.Get().([]byte)
|
||||
func (c *Cipher) getBlock() *[blockSize]byte {
|
||||
return c.buffers.Get().(*[blockSize]byte)
|
||||
}
|
||||
|
||||
// putBlock returns a block to the pool of size blockSize
|
||||
func (c *Cipher) putBlock(buf []byte) {
|
||||
if len(buf) != blockSize {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
@@ -508,7 +527,7 @@ func (c *Cipher) encryptFileName(in string) string {
|
||||
// EncryptFileName encrypts a file path
|
||||
func (c *Cipher) EncryptFileName(in string) string {
|
||||
if c.mode == NameEncryptionOff {
|
||||
return in + encryptedSuffix
|
||||
return in + c.encryptedSuffix
|
||||
}
|
||||
return c.encryptFileName(in)
|
||||
}
|
||||
@@ -568,8 +587,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
// DecryptFileName decrypts a file path
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||
remainingLength := len(in) - len(c.encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
decrypted := in[:remainingLength]
|
||||
@@ -609,7 +628,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
||||
// fromReader fills the nonce from an io.Reader - normally the OSes
|
||||
// crypto random number generator
|
||||
func (n *nonce) fromReader(in io.Reader) error {
|
||||
read, err := io.ReadFull(in, (*n)[:])
|
||||
read, err := readers.ReadFill(in, (*n)[:])
|
||||
if read != fileNonceSize {
|
||||
return fmt.Errorf("short read of nonce: %w", err)
|
||||
}
|
||||
@@ -664,8 +683,8 @@ type encrypter struct {
|
||||
in io.Reader
|
||||
c *Cipher
|
||||
nonce nonce
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -690,9 +709,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
||||
}
|
||||
}
|
||||
// Copy magic into buffer
|
||||
copy(fh.buf, fileMagicBytes)
|
||||
copy((*fh.buf)[:], fileMagicBytes)
|
||||
// Copy nonce into buffer
|
||||
copy(fh.buf[fileMagicSize:], fh.nonce[:])
|
||||
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
@@ -707,22 +726,20 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.bufIndex >= fh.bufSize {
|
||||
// Read data
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := fh.readBuf[:blockDataSize]
|
||||
n, err = io.ReadFull(fh.in, readBuf)
|
||||
readBuf := (*fh.readBuf)[:blockDataSize]
|
||||
n, err = readers.ReadFill(fh.in, readBuf)
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return fh.finish(err)
|
||||
}
|
||||
// possibly err != nil here, but we will process the
|
||||
// data and the next call to ReadFull will return 0, err
|
||||
// data and the next call to ReadFill will return 0, err
|
||||
// Encrypt the block using the nonce
|
||||
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = blockHeaderSize + n
|
||||
fh.nonce.increment()
|
||||
}
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
|
||||
fh.bufIndex += n
|
||||
return n, nil
|
||||
}
|
||||
@@ -763,8 +780,8 @@ type decrypter struct {
|
||||
nonce nonce
|
||||
initialNonce nonce
|
||||
c *Cipher
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -782,12 +799,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
limit: -1,
|
||||
}
|
||||
// Read file header (magic + nonce)
|
||||
readBuf := fh.readBuf[:fileHeaderSize]
|
||||
_, err := io.ReadFull(fh.rc, readBuf)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
readBuf := (*fh.readBuf)[:fileHeaderSize]
|
||||
n, err := readers.ReadFill(fh.rc, readBuf)
|
||||
if n < fileHeaderSize && err == io.EOF {
|
||||
// This read from 0..fileHeaderSize-1 bytes
|
||||
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
||||
} else if err != nil {
|
||||
} else if err != io.EOF && err != nil {
|
||||
return nil, fh.finishAndClose(err)
|
||||
}
|
||||
// check the magic
|
||||
@@ -845,10 +862,8 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
|
||||
func (fh *decrypter) fillBuffer() (err error) {
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := fh.readBuf
|
||||
n, err := io.ReadFull(fh.rc, readBuf)
|
||||
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return err
|
||||
}
|
||||
// possibly err != nil here, but we will process the data and
|
||||
@@ -856,18 +871,25 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
|
||||
// Check header + 1 byte exists
|
||||
if n <= blockHeaderSize {
|
||||
if err != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedFileBadHeader
|
||||
}
|
||||
// Decrypt the block using the nonce
|
||||
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
if !ok {
|
||||
if err != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedBadBlock
|
||||
if !fh.c.passBadBlocks {
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
(*fh.buf)[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = n - blockHeaderSize
|
||||
@@ -893,7 +915,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
||||
toCopy = int(fh.limit)
|
||||
}
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
fh.bufIndex += n
|
||||
if fh.limit >= 0 {
|
||||
fh.limit -= int64(n)
|
||||
@@ -904,9 +926,8 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// calculateUnderlying converts an (offset, limit) in a crypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying
|
||||
// file.
|
||||
// calculateUnderlying converts an (offset, limit) in an encrypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying file.
|
||||
//
|
||||
// It also returns number of bytes to discard after reading the first
|
||||
// block and number of blocks this is from the start so the nonce can
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -28,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
|
||||
{"off", NameEncryptionOff, ""},
|
||||
{"standard", NameEncryptionStandard, ""},
|
||||
{"obfuscate", NameEncryptionObfuscated, ""},
|
||||
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
|
||||
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
|
||||
} {
|
||||
actual, actualErr := NewNameEncryptionMode(test.in)
|
||||
assert.Equal(t, actual, test.expected)
|
||||
if test.expectedErr == "" {
|
||||
assert.NoError(t, actualErr)
|
||||
} else {
|
||||
assert.Error(t, actualErr, test.expectedErr)
|
||||
assert.EqualError(t, actualErr, test.expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -406,6 +405,13 @@ func TestNonStandardEncryptFileName(t *testing.T) {
|
||||
// Off mode
|
||||
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Off mode with custom suffix
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
c.setEncryptedSuffix(".jpg")
|
||||
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
|
||||
// Off mode with empty suffix
|
||||
c.setEncryptedSuffix("none")
|
||||
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
@@ -484,21 +490,27 @@ func TestNonStandardDecryptFileName(t *testing.T) {
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
customSuffix string
|
||||
}{
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"},
|
||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
if test.customSuffix != "" {
|
||||
c.setEncryptedSuffix(test.customSuffix)
|
||||
}
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
@@ -727,7 +739,7 @@ func TestNonceFromReader(t *testing.T) {
|
||||
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
|
||||
buf = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
err = x.fromReader(buf)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
}
|
||||
|
||||
func TestNonceFromBuf(t *testing.T) {
|
||||
@@ -1051,7 +1063,7 @@ func TestRandomSource(t *testing.T) {
|
||||
_, _ = source.Read(buf)
|
||||
sink = newRandomSource(1e8)
|
||||
_, err = io.Copy(sink, source)
|
||||
assert.Error(t, err, "Error in stream")
|
||||
assert.EqualError(t, err, "Error in stream at 1")
|
||||
}
|
||||
|
||||
type zeroes struct{}
|
||||
@@ -1073,7 +1085,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
source := newRandomSource(copySize)
|
||||
encrypted, err := c.newEncrypter(source, nil)
|
||||
assert.NoError(t, err)
|
||||
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
|
||||
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
|
||||
assert.NoError(t, err)
|
||||
sink := newRandomSource(copySize)
|
||||
n, err := io.CopyBuffer(sink, decrypted, buf)
|
||||
@@ -1144,15 +1156,15 @@ func TestEncryptData(t *testing.T) {
|
||||
buf := bytes.NewBuffer(test.in)
|
||||
encrypted, err := c.EncryptData(buf)
|
||||
assert.NoError(t, err)
|
||||
out, err := ioutil.ReadAll(encrypted)
|
||||
out, err := io.ReadAll(encrypted)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expected, out)
|
||||
|
||||
// Check we can decode the data properly too...
|
||||
buf = bytes.NewBuffer(out)
|
||||
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
|
||||
decrypted, err := c.DecryptData(io.NopCloser(buf))
|
||||
assert.NoError(t, err)
|
||||
out, err = ioutil.ReadAll(decrypted)
|
||||
out, err = io.ReadAll(decrypted)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.in, out)
|
||||
}
|
||||
@@ -1168,13 +1180,13 @@ func TestNewEncrypter(t *testing.T) {
|
||||
fh, err := c.newEncrypter(z, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
|
||||
|
||||
// Test error path
|
||||
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
fh, err = c.newEncrypter(z, nil)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
}
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||
@@ -1187,7 +1199,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newEncrypter(in, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
n, err := io.CopyN(io.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(32), n)
|
||||
}
|
||||
@@ -1225,7 +1237,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
|
||||
@@ -1233,7 +1245,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd = newCloseDetector(er)
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "potato")
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// bad magic
|
||||
@@ -1244,7 +1256,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0copy))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
file0copy[i] ^= 0x1
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
@@ -1257,12 +1269,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
|
||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
in1 := bytes.NewBuffer(file16)
|
||||
in := ioutil.NopCloser(io.MultiReader(in1, in2))
|
||||
in := io.NopCloser(io.MultiReader(in1, in2))
|
||||
|
||||
fh, err := c.newDecrypter(in)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
n, err := io.CopyN(io.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(16), n)
|
||||
}
|
||||
@@ -1274,14 +1286,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
|
||||
// Make random data
|
||||
const dataSize = 150000
|
||||
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
|
||||
plaintext, err := io.ReadAll(newRandomSource(dataSize))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Encrypt the data
|
||||
buf := bytes.NewBuffer(plaintext)
|
||||
encrypted, err := c.EncryptData(buf)
|
||||
assert.NoError(t, err)
|
||||
ciphertext, err := ioutil.ReadAll(encrypted)
|
||||
ciphertext, err := io.ReadAll(encrypted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
|
||||
@@ -1300,7 +1312,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
end = len(ciphertext)
|
||||
}
|
||||
}
|
||||
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
@@ -1490,14 +1502,16 @@ func TestDecrypterRead(t *testing.T) {
|
||||
assert.NoError(t, err, what)
|
||||
continue
|
||||
}
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
_, err = io.ReadAll(fh)
|
||||
var expectedErr error
|
||||
switch {
|
||||
case i == fileHeaderSize:
|
||||
// This would normally produce an error *except* on the first block
|
||||
expectedErr = nil
|
||||
case i <= fileHeaderSize+blockHeaderSize:
|
||||
expectedErr = ErrorEncryptedFileBadHeader
|
||||
default:
|
||||
expectedErr = io.ErrUnexpectedEOF
|
||||
expectedErr = ErrorEncryptedBadBlock
|
||||
}
|
||||
if expectedErr != nil {
|
||||
assert.EqualError(t, err, expectedErr.Error(), what)
|
||||
@@ -1514,8 +1528,8 @@ func TestDecrypterRead(t *testing.T) {
|
||||
cd := newCloseDetector(in)
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.NoError(t, err)
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
assert.Error(t, err, "potato")
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
// Test corrupting the input
|
||||
@@ -1524,17 +1538,28 @@ func TestDecrypterRead(t *testing.T) {
|
||||
copy(file16copy, file16)
|
||||
for i := range file16copy {
|
||||
file16copy[i] ^= 0xFF
|
||||
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
if i < fileMagicSize {
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Nil(t, fh)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
|
||||
}
|
||||
file16copy[i] ^= 0xFF
|
||||
}
|
||||
|
||||
// Test that we can corrupt a byte and read zeroes if
|
||||
// passBadBlocks is set
|
||||
copy(file16copy, file16)
|
||||
file16copy[len(file16copy)-1] ^= 0xFF
|
||||
c.passBadBlocks = true
|
||||
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
assert.NoError(t, err)
|
||||
buf, err := io.ReadAll(fh)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, make([]byte, 16), buf)
|
||||
}
|
||||
|
||||
func TestDecrypterClose(t *testing.T) {
|
||||
@@ -1555,7 +1580,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
|
||||
// double close
|
||||
err = fh.Close()
|
||||
assert.Error(t, err, ErrorFileClosed.Error())
|
||||
assert.EqualError(t, err, ErrorFileClosed.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// try again reading the file this time
|
||||
@@ -1565,7 +1590,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
// close after reading
|
||||
out, err := ioutil.ReadAll(fh)
|
||||
out, err := io.ReadAll(fh)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte{1}, out)
|
||||
assert.Equal(t, io.EOF, fh.err)
|
||||
@@ -1582,8 +1607,6 @@ func TestPutGetBlock(t *testing.T) {
|
||||
block := c.getBlock()
|
||||
c.putBlock(block)
|
||||
c.putBlock(block)
|
||||
|
||||
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
|
||||
@@ -48,7 +48,7 @@ func init() {
|
||||
Help: "Very simple filename obfuscation.",
|
||||
}, {
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -79,7 +79,9 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
|
||||
Normally this option is not what you want, but if you have two crypts
|
||||
pointing to the same backend you can use it.
|
||||
@@ -119,6 +121,15 @@ names, or for debugging purposes.`,
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "pass_bad_blocks",
|
||||
Help: `If set this will pass bad blocks through as all 0.
|
||||
|
||||
This should not be set in normal operation, it should only be set if
|
||||
trying to recover an encrypted file with errors and it is desired to
|
||||
recover as much of the file as possible.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
@@ -138,10 +149,18 @@ length and if it's case sensitive.`,
|
||||
},
|
||||
{
|
||||
Value: "base32768",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "suffix",
|
||||
Help: `If this is set it will override the default suffix of ".bin".
|
||||
|
||||
Setting suffix to "none" will result in an empty suffix. This may be useful
|
||||
when the path length is critical.`,
|
||||
Default: ".bin",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -174,6 +193,8 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
}
|
||||
cipher.setEncryptedSuffix(opt.Suffix)
|
||||
cipher.setPassBadBlocks(opt.PassBadBlocks)
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
@@ -235,7 +256,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
@@ -247,6 +268,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
@@ -262,7 +284,9 @@ type Options struct {
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
Suffix string `config:"suffix"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
@@ -396,6 +420,8 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
if f.opt.NoDataEncryption {
|
||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
if err == nil && o != nil {
|
||||
@@ -413,6 +439,9 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
if ci.IgnoreChecksum {
|
||||
ht = hash.None
|
||||
}
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
@@ -449,7 +478,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
@@ -1047,10 +1076,11 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
// Get the underlying object if there is one
|
||||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||
// Prefer direct interface assertion
|
||||
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||
// Otherwise likely is an operations.OverrideRemote
|
||||
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
|
||||
// Unwrap if it is an operations.OverrideRemote
|
||||
srcObj = do.UnWrap()
|
||||
} else {
|
||||
// Otherwise don't unwrap any further
|
||||
return "", nil
|
||||
}
|
||||
// if this is wrapping a local object then we work out the hash
|
||||
|
||||
@@ -17,41 +17,28 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testWrapper struct {
|
||||
fs.ObjectInfo
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or nil if it
|
||||
// isn't wrapping anything
|
||||
func (o testWrapper) UnWrap() fs.Object {
|
||||
if o, ok := o.ObjectInfo.(fs.Object); ok {
|
||||
return o
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a temporary local fs to upload things from
|
||||
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
|
||||
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||
}
|
||||
return localFs, cleanup
|
||||
})
|
||||
return localFs
|
||||
}
|
||||
|
||||
// Upload a file to a remote
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
||||
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
||||
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, obj.Remove(context.Background()))
|
||||
}
|
||||
return obj, cleanup
|
||||
})
|
||||
return obj
|
||||
}
|
||||
|
||||
// Test the ObjectInfo
|
||||
@@ -65,11 +52,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
path = "_wrap"
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
localFs := makeTempLocalFs(t)
|
||||
|
||||
obj, cleanupObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupObj()
|
||||
obj := uploadFile(t, localFs, path, contents)
|
||||
|
||||
// encrypt the data
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
@@ -83,7 +68,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
var oi fs.ObjectInfo = obj
|
||||
if wrap {
|
||||
// wrap the object in an fs.ObjectUnwrapper if required
|
||||
oi = testWrapper{oi}
|
||||
oi = fs.NewOverrideRemote(oi, "new_remote")
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
@@ -116,16 +101,13 @@ func testComputeHash(t *testing.T, f *Fs) {
|
||||
t.Skipf("%v: does not support hashes", f.Fs)
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
localFs := makeTempLocalFs(t)
|
||||
|
||||
// Upload a file to localFs as a test object
|
||||
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupLocalObj()
|
||||
localObj := uploadFile(t, localFs, path, contents)
|
||||
|
||||
// Upload the same data to the remote Fs also
|
||||
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
|
||||
defer cleanupRemoteObj()
|
||||
remoteObj := uploadFile(t, f, path, contents)
|
||||
|
||||
// Calculate the expected Hash of the remote object
|
||||
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
||||
|
||||
@@ -14,11 +14,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -203,7 +202,7 @@ func init() {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
|
||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
||||
OAuth2Config: driveConfig,
|
||||
})
|
||||
@@ -278,20 +277,23 @@ Leave blank normally.
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth_owner_only",
|
||||
Default: false,
|
||||
@@ -417,10 +419,11 @@ date is used.`,
|
||||
Help: "Size of listing chunk 100-1000, 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
@@ -452,7 +455,11 @@ If downloading a file returns the error "This file has been identified
|
||||
as malware or spam and cannot be downloaded" with the error code
|
||||
"cannotDownloadAbusiveFile" then supply this flag to rclone to
|
||||
indicate you acknowledge the risks of downloading the file and rclone
|
||||
will download it anyway.`,
|
||||
will download it anyway.
|
||||
|
||||
Note that if you are using service account it will need Manager
|
||||
permission (not Content Manager) to for this flag to work. If the SA
|
||||
does not have the right permission, Google will just ignore the flag.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "keep_revision_forever",
|
||||
@@ -496,7 +503,9 @@ need to use --ignore size also.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||
|
||||
This can be useful if you wish to do a server-side copy between two
|
||||
different Google drives. Note that this isn't enabled by default
|
||||
@@ -587,7 +596,8 @@ Note also that opening the folder once in the web interface (with the
|
||||
user you've authenticated rclone with) seems to be enough so that the
|
||||
resource key is no needed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -595,6 +605,18 @@ resource key is no needed.
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// Don't encode / as it's a valid name character in drive.
|
||||
Default: encoder.EncodeInvalidUtf8,
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
|
||||
@@ -651,6 +673,7 @@ type Options struct {
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -758,7 +781,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
@@ -1108,7 +1131,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
@@ -1119,6 +1142,12 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
oAuthClient, err = google.DefaultClient(ctx, scopes...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create client from environment: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||
if err != nil {
|
||||
@@ -1490,6 +1519,9 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2877,6 +2909,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
changesCall.Spaces("appDataFolder")
|
||||
}
|
||||
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -3323,9 +3356,9 @@ This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend -i untrash drive:directory subdir
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
|
||||
Use the -i flag to see what would be restored before restoring it.
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
@@ -3355,7 +3388,7 @@ component will be used as the file name.
|
||||
If the destination is a drive backend then server-side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
@@ -3431,13 +3464,12 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile(`[^\w_. -]+`)
|
||||
if _, ok := opt["config"]; ok {
|
||||
lines := []string{}
|
||||
upstreams := []string{}
|
||||
names := make(map[string]struct{}, len(drives))
|
||||
for i, drive := range drives {
|
||||
name := re.ReplaceAllString(drive.Name, "_")
|
||||
name := fspath.MakeConfigName(drive.Name)
|
||||
for {
|
||||
if _, found := names[name]; !found {
|
||||
break
|
||||
@@ -3800,7 +3832,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
data = data[:limit]
|
||||
}
|
||||
|
||||
return ioutil.NopCloser(bytes.NewReader(data)), nil
|
||||
return io.NopCloser(bytes.NewReader(data)), nil
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
@@ -3859,7 +3891,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"os"
|
||||
"path"
|
||||
@@ -78,7 +77,7 @@ var additionalMimeTypes = map[string]string{
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
||||
fetchFormatsOnce.Do(func() {})
|
||||
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
var about struct {
|
||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
||||
@@ -244,6 +243,15 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
|
||||
sqEItem := googleapi.ErrorItem{
|
||||
Reason: "storageQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = sqEItem
|
||||
expectedStorageQuotaError := fserrors.FatalError(&generic403)
|
||||
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, storageQuotaExceededRetry)
|
||||
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
@@ -140,55 +139,12 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
return complete, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
if launchBatchStatus.AsyncJobId == "" {
|
||||
return nil, errors.New("wait for batch completion: empty job ID")
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxSleepTime = 1 * time.Second
|
||||
startTime := time.Now()
|
||||
try := 1
|
||||
for {
|
||||
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
|
||||
if remaining < 0 {
|
||||
break
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > maxSleepTime {
|
||||
sleepTime = maxSleepTime
|
||||
}
|
||||
try++
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||
// If commit fails then signal clients if sync
|
||||
var signalled = b.async
|
||||
defer func() {
|
||||
if err != nil && signalled {
|
||||
if err != nil && !signalled {
|
||||
// Signal to clients that there was an error
|
||||
for _, result := range results {
|
||||
result <- batcherResponse{err: err}
|
||||
|
||||
@@ -58,7 +58,7 @@ import (
|
||||
const (
|
||||
rcloneClientID = "5jcck7diasz0rqy"
|
||||
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
|
||||
minSleep = 10 * time.Millisecond
|
||||
defaultMinSleep = fs.Duration(10 * time.Millisecond)
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
@@ -182,8 +182,9 @@ client_secret) to use this option as currently rclone's default set of
|
||||
permissions doesn't include "members.read". This can be added once
|
||||
v1.55 or later is in use everywhere.
|
||||
`,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "shared_files",
|
||||
Help: `Instructs rclone to work on individual shared files.
|
||||
@@ -260,8 +261,8 @@ uploaded.
|
||||
The default for this is 0 which means rclone will choose a sensible
|
||||
default based on the batch_mode in use.
|
||||
|
||||
- batch_mode: async - default batch_timeout is 500ms
|
||||
- batch_mode: sync - default batch_timeout is 10s
|
||||
- batch_mode: async - default batch_timeout is 10s
|
||||
- batch_mode: sync - default batch_timeout is 500ms
|
||||
- batch_mode: off - not in use
|
||||
`,
|
||||
Default: fs.Duration(0),
|
||||
@@ -271,6 +272,11 @@ default based on the batch_mode in use.
|
||||
Help: `Max time to wait for a batch to finish committing`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pacer_min_sleep",
|
||||
Default: defaultMinSleep,
|
||||
Help: "Minimum time to sleep between API calls.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -299,6 +305,7 @@ type Options struct {
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -442,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
||||
if err != nil {
|
||||
@@ -536,7 +543,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// if the moint failed we have to abort here
|
||||
// if the mount failed we have to abort here
|
||||
}
|
||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||
// we disable shared folder mode and proceed normally
|
||||
@@ -719,7 +726,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -906,7 +913,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||
|
||||
@@ -118,6 +118,9 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
Single: 1,
|
||||
Pass: f.opt.FilePassword,
|
||||
}
|
||||
if f.opt.CDN {
|
||||
request.CDN = 1
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/download/get_token.cgi",
|
||||
@@ -405,6 +408,32 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
|
||||
request := &MoveDirRequest{
|
||||
FolderID: folderID,
|
||||
DestinationFolderID: destinationFolderID,
|
||||
Rename: newLeaf,
|
||||
// DestinationUser: destinationUser,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/mv.cgi",
|
||||
}
|
||||
|
||||
response = &MoveDirResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move dir: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
||||
request := &CopyFileRequest{
|
||||
URLs: []string{url},
|
||||
@@ -473,7 +502,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("didnt got an upload node: %w", err)
|
||||
return nil, fmt.Errorf("didn't get an upload node: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
|
||||
@@ -38,8 +38,9 @@ func init() {
|
||||
Description: "1Fichier",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
@@ -54,6 +55,11 @@ func init() {
|
||||
Name: "folder_password",
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "Set if you wish to use CDN download links.",
|
||||
Name: "cdn",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -89,6 +95,7 @@ type Options struct {
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
FilePassword string `config:"file_password"`
|
||||
FolderPassword string `config:"folder_password"`
|
||||
CDN bool `config:"cdn"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -333,7 +340,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
return nil, errors.New("File too big, can't upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
@@ -481,6 +488,51 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove.
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists.
|
||||
//
|
||||
// This is complicated by the fact that we can't use moveDir to move
|
||||
// to a different directory AND rename at the same time as it can
|
||||
// overwrite files in the source directory.
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcIDnumeric, err := strconv.Atoi(srcID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var resp *MoveDirResponse
|
||||
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't rename leaf: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
|
||||
}
|
||||
|
||||
srcFs.dirCache.FlushDir(srcRemote)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side move operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
@@ -554,6 +606,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
|
||||
@@ -20,6 +20,7 @@ type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
CDN int `json:"cdn,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
@@ -69,6 +70,22 @@ type MoveFileResponse struct {
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// MoveDirRequest is the request structure of the corresponding request
|
||||
type MoveDirRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
DestinationFolderID int `json:"destination_folder_id,omitempty"`
|
||||
DestinationUser string `json:"destination_user"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// MoveDirResponse is the response structure of the corresponding request
|
||||
type MoveDirResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
OldName string `json:"old_name"`
|
||||
NewName string `json:"new_name"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
type CopyFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -85,6 +84,7 @@ Leave blank normally.
|
||||
|
||||
Fill in to make rclone start with directory of a given ID.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "permanent_token",
|
||||
Help: `Permanent Authentication Token.
|
||||
@@ -98,6 +98,7 @@ These tokens are normally valid for several years.
|
||||
|
||||
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "token",
|
||||
Help: `Session Token.
|
||||
@@ -107,7 +108,8 @@ usually valid for 1 hour.
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "token_expiry",
|
||||
Help: `Token expiry time.
|
||||
@@ -1186,7 +1188,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
if o.contentType == emptyMimeType {
|
||||
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
return io.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
resp, err := o.fs.rpc(ctx, "getFile", params{
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/proxy"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
@@ -48,13 +49,15 @@ func init() {
|
||||
Description: "FTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port number.",
|
||||
@@ -70,7 +73,7 @@ func init() {
|
||||
When using implicit FTP over TLS the client connects using TLS
|
||||
right from the start which breaks compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
than port 21. Cannot be used in combination with explicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "explicit_tls",
|
||||
@@ -78,7 +81,7 @@ than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
|
||||
When using explicit FTP over TLS the client explicitly requests
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
@@ -172,6 +175,18 @@ Enabled by default. Use 0 to disable.`,
|
||||
If this is set and no password is supplied then rclone will ask for a password
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "socks_proxy",
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -216,6 +231,7 @@ type Options struct {
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -315,18 +331,33 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Return a *textproto.Error if err contains one or nil otherwise
|
||||
func textprotoError(err error) (errX *textproto.Error) {
|
||||
if errors.As(err, &errX) {
|
||||
return errX
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if this FTP error should be retried
|
||||
func isRetriableFtpError(err error) bool {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
if isRetriableFtpError(err) {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -342,7 +373,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
}()
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -463,8 +499,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
var tpErr *textproto.Error
|
||||
if !errors.As(err, &tpErr) {
|
||||
if tpErr := textprotoError(err); tpErr != nil {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
@@ -566,6 +601,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
@@ -613,8 +649,7 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
@@ -625,8 +660,7 @@ func translateErrorFile(err error) error {
|
||||
|
||||
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
||||
func translateErrorDir(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
@@ -657,8 +691,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
if remote == "" || remote == "." || remote == "/" {
|
||||
// if root, assume exists and synthesize an entry
|
||||
return &ftp.Entry{
|
||||
Name: "",
|
||||
@@ -666,13 +699,38 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
Time: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("findItem: %w", err)
|
||||
}
|
||||
|
||||
// returns TRUE if MLST is supported which is required to call GetEntry
|
||||
if c.IsTimePreciseInList() {
|
||||
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
err = translateErrorFile(err)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusBadArguments:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil {
|
||||
f.entryToStandard(entry)
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -691,7 +749,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -713,7 +771,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("dirExists: %w", err)
|
||||
}
|
||||
@@ -857,32 +915,18 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
// getInfo reads the FileInfo for a path
|
||||
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
file, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getInfo: %w", err)
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
file := files[i]
|
||||
f.entryToStandard(file)
|
||||
if file.Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
return nil, err
|
||||
} else if file != nil {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -913,8 +957,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
@@ -1083,7 +1126,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if !o.fs.fSetTime {
|
||||
fs.Errorf(o.fs, "SetModTime is not supported")
|
||||
fs.Debugf(o.fs, "SetModTime is not supported")
|
||||
return nil
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
@@ -1155,8 +1198,7 @@ func (f *ftpReadCloser) Close() error {
|
||||
// mask the error if it was caused by a premature close
|
||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
err = nil
|
||||
@@ -1182,15 +1224,26 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
|
||||
var (
|
||||
fd *ftp.Response
|
||||
c *ftp.ServerConn
|
||||
)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
c, err = o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return false, err // getFtpConnection has retries already
|
||||
}
|
||||
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
@@ -1223,13 +1276,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
if err != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -19,8 +19,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -82,7 +82,8 @@ func init() {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
envAuth, _ := m.Get("env_auth")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
@@ -90,15 +91,21 @@ func init() {
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user_project",
|
||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
@@ -297,6 +304,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "directory_markers",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Upload an empty object with a trailing slash when a new directory is created
|
||||
|
||||
Empty folders are unsupported for bucket based remotes, this option creates an empty
|
||||
object ending with "/", to persist the folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
@@ -330,6 +346,17 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -337,6 +364,7 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
UserProject string `config:"user_project"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
Anonymous bool `config:"anonymous"`
|
||||
@@ -349,6 +377,8 @@ type Options struct {
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -444,7 +474,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -487,7 +517,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
@@ -500,6 +530,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
@@ -525,6 +560,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
@@ -541,7 +579,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = get.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -601,9 +643,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
directory += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
if !recurse {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
foundItems := 0
|
||||
for {
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -619,6 +665,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
return err
|
||||
}
|
||||
if !recurse {
|
||||
foundItems += len(objects.Prefixes)
|
||||
var object storage.Object
|
||||
for _, remote := range objects.Prefixes {
|
||||
if !strings.HasSuffix(remote, "/") {
|
||||
@@ -639,22 +686,29 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
}
|
||||
}
|
||||
foundItems += len(objects.Items)
|
||||
for _, object := range objects.Items {
|
||||
remote := f.opt.Enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
|
||||
err = fn(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -664,6 +718,17 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
list.PageToken(objects.NextPageToken)
|
||||
}
|
||||
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
|
||||
// Determine whether the directory exists or not by whether it has a marker
|
||||
_, err := f.readObjectInfo(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -707,6 +772,9 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
listBuckets = listBuckets.UserProject(f.opt.UserProject)
|
||||
}
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -824,10 +892,69 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Create directory marker file and parents
|
||||
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
|
||||
if !f.opt.DirectoryMarkers || bucket == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object to be uploaded
|
||||
o := &Object{
|
||||
fs: f,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
|
||||
for {
|
||||
_, bucketPath := f.split(dir)
|
||||
// Don't create the directory marker if it is the bucket or at the very root
|
||||
if bucketPath == "" {
|
||||
break
|
||||
}
|
||||
o.remote = dir + "/"
|
||||
|
||||
// Check to see if object already exists
|
||||
_, err := o.readObjectInfo(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload it if not
|
||||
fs.Debugf(o, "Creating directory marker")
|
||||
content := io.Reader(strings.NewReader(""))
|
||||
err = o.Update(ctx, content, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating directory marker failed: %w", err)
|
||||
}
|
||||
|
||||
// Now check parent directory exists
|
||||
dir = path.Dir(dir)
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
e := f.checkBucket(ctx, bucket)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
return f.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
@@ -836,7 +963,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = list.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -871,7 +1002,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
insertBucket = insertBucket.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
insertBucket = insertBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
@@ -891,12 +1026,28 @@ func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
}
|
||||
fs.Debugf(o, "Removing directory marker")
|
||||
err := o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing directory marker failed: %w", err)
|
||||
}
|
||||
}
|
||||
if bucket == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
@@ -918,7 +1069,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.checkBucket(ctx, dstBucket)
|
||||
err := f.mkdirParent(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -942,7 +1093,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
rewriteRequest = rewriteRequest.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
rewriteRequest.UserProject(f.opt.UserProject)
|
||||
}
|
||||
rewriteResponse, err = rewriteRequest.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1052,8 +1207,17 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return o.fs.readObjectInfo(ctx, bucket, bucketPath)
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
object, err = get.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1125,7 +1289,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
copyObject = copyObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = copyObject.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1142,6 +1310,9 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.UserProject != "" {
|
||||
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1185,11 +1356,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.checkBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
if !strings.HasSuffix(o.remote, "/") {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
@@ -1234,7 +1408,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
insertObject = insertObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1249,7 +1427,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -16,3 +17,17 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestGoogleCloudStorage"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package googlephotos
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"testing"
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -56,7 +55,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
@@ -99,7 +98,7 @@ func TestIntegration(t *testing.T) {
|
||||
t.Run("ObjectOpen", func(t *testing.T) {
|
||||
in, err := dstObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
buf, err := ioutil.ReadAll(in)
|
||||
buf, err := io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, in.Close())
|
||||
assert.True(t, len(buf) > 1000)
|
||||
@@ -221,7 +220,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
|
||||
@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
|
||||
@@ -166,6 +166,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
@@ -118,7 +117,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
|
||||
defer func() {
|
||||
_ = r.Close()
|
||||
}()
|
||||
if _, err = io.Copy(ioutil.Discard, r); err != nil {
|
||||
if _, err = io.Copy(io.Discard, r); err != nil {
|
||||
fs.Infof(o, "update failed (copy): %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -19,9 +19,10 @@ func init() {
|
||||
Description: "Hadoop distributed file system",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
@@ -29,6 +30,7 @@ func init() {
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_principal_name",
|
||||
Help: `Kerberos service principal name for the namenode.
|
||||
@@ -36,15 +38,16 @@ func init() {
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
checks, and wire encryption are required when communicating with
|
||||
the datanodes. Possible values are 'authentication', 'integrity'
|
||||
and 'privacy'. Used only with KERBEROS enabled.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
|
||||
@@ -294,15 +294,6 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// copyDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
|
||||
}
|
||||
|
||||
// moveDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
package hidrive
|
||||
|
||||
// FIXME HiDrive only supports file or folder names of 255 characters or less.
|
||||
// Operations that create files oder folder with longer names will throw a HTTP error:
|
||||
// Operations that create files or folders with longer names will throw an HTTP error:
|
||||
// - 422 Unprocessable Entity
|
||||
// A more graceful way for rclone to handle this may be desirable.
|
||||
|
||||
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("could not access root-prefix: %w", err)
|
||||
}
|
||||
if item.Type != api.HiDriveObjectTypeDirectory {
|
||||
return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
|
||||
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -305,7 +304,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat(ctx)
|
||||
err := o.head(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -317,15 +316,6 @@ func (f *Fs) url(remote string) string {
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
}
|
||||
|
||||
// parse s into an int64, on failure return def
|
||||
func parseInt64(s string, def int64) int64 {
|
||||
n, e := strconv.ParseInt(s, 10, 64)
|
||||
if e != nil {
|
||||
return def
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Errors returned by parseName
|
||||
var (
|
||||
errURLJoinFailed = errors.New("URLJoin failed")
|
||||
@@ -500,12 +490,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err := file.stat(ctx); err {
|
||||
switch err := file.head(ctx); err {
|
||||
case nil:
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
add(fs.NewDir(remote, time.Time{}))
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
@@ -517,7 +507,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
add(fs.NewDir(remote, time.Time{}))
|
||||
} else {
|
||||
in <- remote
|
||||
}
|
||||
@@ -579,8 +569,8 @@ func (o *Object) url() string {
|
||||
return o.fs.url(o.remote)
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
// head sends a HEAD request to update info fields in the Object
|
||||
func (o *Object) head(ctx context.Context) error {
|
||||
if o.fs.opt.NoHead {
|
||||
o.size = -1
|
||||
o.modTime = timeUnset
|
||||
@@ -601,13 +591,19 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat: %w", err)
|
||||
}
|
||||
return o.decodeMetadata(ctx, res)
|
||||
}
|
||||
|
||||
// decodeMetadata updates info fields in the Object according to HTTP response headers
|
||||
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
|
||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
t = timeUnset
|
||||
}
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
o.size = rest.ParseSizeFromHeaders(res.Header)
|
||||
|
||||
// If NoSlash is set then check ContentType to see if it is a directory
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
@@ -653,6 +649,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
if err = o.decodeMetadata(ctx, res); err != nil {
|
||||
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package http
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
@@ -33,20 +33,21 @@ var (
|
||||
lineEndSize = 1
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// prepareServer prepares the test server and shuts it down automatically
|
||||
// when the test completes.
|
||||
func prepareServer(t *testing.T) configmap.Simple {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// verify the file path is correct, and also check which line endings
|
||||
// are used to get sizes right ("\n" except on Windows, but even there
|
||||
// we may have "\n" or "\r\n" depending on git crlf setting)
|
||||
fileList, err := ioutil.ReadDir(filesPath)
|
||||
fileList, err := os.ReadDir(filesPath)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(fileList), 0)
|
||||
for _, file := range fileList {
|
||||
if !file.IsDir() {
|
||||
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
if strings.HasSuffix(string(data), "\r\n") {
|
||||
lineEndSize = 2
|
||||
}
|
||||
@@ -78,20 +79,21 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
"url": ts.URL,
|
||||
"headers": strings.Join(headers, ","),
|
||||
}
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
// return a function to tidy up
|
||||
return m, ts.Close
|
||||
return m
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
// prepare prepares the test server and shuts it down automatically
|
||||
// when the test completes.
|
||||
func prepare(t *testing.T) fs.Fs {
|
||||
m := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
return f
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
@@ -134,22 +136,19 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
}
|
||||
|
||||
func TestListRoot(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestListRootNoSlash(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
f := prepare(t)
|
||||
f.(*Fs).opt.NoSlash = true
|
||||
defer tidy()
|
||||
|
||||
testListRoot(t, f, true)
|
||||
}
|
||||
|
||||
func TestListSubDir(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
entries, err := f.List(context.Background(), "three")
|
||||
require.NoError(t, err)
|
||||
@@ -166,8 +165,7 @@ func TestListSubDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewObject(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
@@ -194,36 +192,69 @@ func TestNewObject(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
for _, head := range []bool{false, true} {
|
||||
if !head {
|
||||
m.Set("no_head", "true")
|
||||
}
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test normal read
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
for _, rangeRead := range []bool{false, true} {
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
if !head {
|
||||
// Test mod time is still indeterminate
|
||||
tObj := o.ModTime(context.Background())
|
||||
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj))
|
||||
|
||||
// Test file size is still indeterminate
|
||||
assert.Equal(t, int64(-1), o.Size())
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if !rangeRead {
|
||||
// Test normal read
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
}
|
||||
} else {
|
||||
// Test with range request
|
||||
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
// Test the time is always correct on the object after file open
|
||||
tObj := o.ModTime(context.Background())
|
||||
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
|
||||
|
||||
if !rangeRead {
|
||||
// Test the file size
|
||||
assert.Equal(t, int64(len(data)), o.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
|
||||
func TestMimeType(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
@@ -234,8 +265,7 @@ func TestMimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
@@ -244,8 +274,7 @@ func TestIsAFileRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift
|
||||
type auth struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// newAuth creates a swift authenticator
|
||||
func newAuth(f *Fs) *auth {
|
||||
return &auth{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Request constructs an http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||
return a.f.credentials.Endpoint
|
||||
}
|
||||
|
||||
// The access token
|
||||
func (a *auth) Token() string {
|
||||
return a.f.credentials.Token
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ swift.Authenticator = (*auth)(nil)
|
||||
@@ -1,200 +0,0 @@
|
||||
// Package hubic provides an interface to the Hubic object storage
|
||||
// system.
|
||||
package hubic
|
||||
|
||||
// This uses the normal swift mechanism to update the credentials and
|
||||
// ignores the expires field returned by the Hubic API. This may need
|
||||
// to be revisited after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
|
||||
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"credentials.r", // Read OpenStack credentials
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.hubic.com/oauth/auth/",
|
||||
TokenURL: "https://api.hubic.com/oauth/token/",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
// credentials is the JSON returned from the Hubic API to read the
|
||||
// OpenStack credentials
|
||||
type credentials struct {
|
||||
Token string `json:"token"` // OpenStack token
|
||||
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||
}
|
||||
|
||||
// Fs represents a remote hubic
|
||||
type Fs struct {
|
||||
fs.Fs // wrapped Fs
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // client for oauth api
|
||||
credentials credentials // returned from the Hubic API
|
||||
expires time.Time // time credentials expire
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
type Object struct {
|
||||
*swift.Object
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Object.String()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.Fs == nil {
|
||||
return "Hubic"
|
||||
}
|
||||
return fmt.Sprintf("Hubic %s", f.Fs.String())
|
||||
}
|
||||
|
||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
err = decoder.Decode(&result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(f, "Got credentials %+v", result)
|
||||
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
|
||||
return errors.New("couldn't read token, result and expired from credentials")
|
||||
}
|
||||
f.credentials = result
|
||||
expires, err := time.Parse(time.RFC3339, result.Expires)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.expires = expires
|
||||
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
client: client,
|
||||
}
|
||||
|
||||
// Make the swift Connection
|
||||
ci := fs.GetConfig(ctx)
|
||||
c := &swiftLib.Connection{
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
}
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
opt := new(swift.Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = swiftFs
|
||||
f.features = f.Fs.Features().Wrap(f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
)
|
||||
@@ -1,19 +0,0 @@
|
||||
// Test Hubic filesystem interface
|
||||
package hubic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hubic"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
SkipObjectCheckWrap: true,
|
||||
})
|
||||
}
|
||||
@@ -133,11 +133,13 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
|
||||
},
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
|
||||
Name: "secret_access_key",
|
||||
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
|
||||
Name: "endpoint",
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -75,6 +74,10 @@ const (
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
|
||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
||||
onlimeCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -85,7 +88,7 @@ func init() {
|
||||
Description: "Jottacloud",
|
||||
NewFs: NewFs,
|
||||
Config: Config,
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
@@ -120,7 +123,7 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -140,6 +143,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}, {
|
||||
Value: "onlime",
|
||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
@@ -262,6 +268,21 @@ machines.`)
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "onlime": // onlime cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
},
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
@@ -822,7 +843,7 @@ func (f *Fs) allocatePathRaw(file string, absolute bool) string {
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if legacyTokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
refreshBody, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -832,7 +853,7 @@ func grantTypeFilter(req *http.Request) {
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
req.Body = io.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1789,7 +1810,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
var tempFile *os.File
|
||||
|
||||
// create the cache file
|
||||
tempFile, err = ioutil.TempFile("", cachePrefix)
|
||||
tempFile, err = os.CreateTemp("", cachePrefix)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -1817,7 +1838,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = ioutil.ReadAll(teeReader)
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -1839,12 +1860,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err == nil {
|
||||
// if the object exists delete it
|
||||
err = o.remove(ctx, true)
|
||||
if err != nil {
|
||||
if err != nil && err != fs.ErrorObjectNotFound {
|
||||
// if delete failed then report that, unless it was because the file did not exist after all
|
||||
return fmt.Errorf("failed to remove old object: %w", err)
|
||||
}
|
||||
}
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1914,7 +1935,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// copy the already uploaded bytes into the trash :)
|
||||
var result api.UploadResponse
|
||||
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
|
||||
_, err = io.CopyN(io.Discard, in, response.ResumePos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1931,7 +1952,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
|
||||
return o.readMetaData(ctx, true)
|
||||
}
|
||||
|
||||
@@ -1952,10 +1973,17 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
|
||||
opts.Parameters.Set("dl", "true")
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -61,9 +61,10 @@ func init() {
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
@@ -376,7 +377,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for i, file := range files {
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
|
||||
entries[i] = fs.NewDir(remote, time.Time{})
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -124,8 +123,8 @@ routine so this flag shouldn't normally be used.`,
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
- source file is being updated" if the file changes during upload.
|
||||
are being uploaded and aborts with a message which starts "can't copy -
|
||||
source file is being updated" if the file changes during upload.
|
||||
|
||||
However on some file systems this modification time check may fail (e.g.
|
||||
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
||||
@@ -267,7 +266,10 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
var (
|
||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
)
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -301,6 +303,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
FilterAware: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -311,7 +314,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
// Check to see if this is a .rclonelink if not found
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
return nil, errLinksNeedsSuffix
|
||||
}
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -504,7 +516,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
@@ -521,15 +533,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name := fi.Name()
|
||||
mode := fi.Mode()
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
// Quietly skip errors on excluded files and directories
|
||||
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||
@@ -554,6 +565,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -646,7 +662,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
precision = time.Second
|
||||
|
||||
// Create temporary file and test it
|
||||
fd, err := ioutil.TempFile("", "rclone")
|
||||
fd, err := os.CreateTemp("", "rclone")
|
||||
if err != nil {
|
||||
// If failed return 1s
|
||||
// fmt.Println("Failed to create temp file", err)
|
||||
@@ -1073,7 +1089,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
@@ -1400,30 +1416,27 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
} else {
|
||||
s = filepath.Clean(s)
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -14,10 +14,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -33,7 +35,6 @@ func TestMain(m *testing.M) {
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
@@ -78,7 +79,6 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
func TestSymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
f := r.Flocal.(*Fs)
|
||||
dir := f.root
|
||||
|
||||
@@ -147,10 +147,24 @@ func TestSymlink(t *testing.T) {
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check that NewFs works with the suffixed version and --links
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.Equal(t, dir, f2.(*Fs).root)
|
||||
|
||||
// Check that NewFs doesn't see the non suffixed version with --links
|
||||
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, errLinksNeedsSuffix, err)
|
||||
require.Nil(t, f2)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
contents, err := ioutil.ReadAll(in)
|
||||
contents, err := io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt", string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
@@ -158,7 +172,7 @@ func TestSymlink(t *testing.T) {
|
||||
// Check reading the object with range
|
||||
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
|
||||
require.NoError(t, err)
|
||||
contents, err = ioutil.ReadAll(in)
|
||||
contents, err = io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt"[2:5+1], string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
@@ -177,7 +191,6 @@ func TestSymlinkError(t *testing.T) {
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
@@ -208,7 +221,6 @@ func TestHashOnUpdate(t *testing.T) {
|
||||
func TestHashOnDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
@@ -237,7 +249,6 @@ func TestHashOnDelete(t *testing.T) {
|
||||
func TestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
@@ -372,7 +383,6 @@ func TestMetadata(t *testing.T) {
|
||||
func TestFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
r.WriteFile("included", "included file", when)
|
||||
r.WriteFile("excluded", "excluded file", when)
|
||||
@@ -401,3 +411,147 @@ func TestFilter(t *testing.T) {
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func testFilterSymlink(t *testing.T, copyLinks bool) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
|
||||
r.WriteFile("included.file", "included file", when)
|
||||
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
|
||||
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
|
||||
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
|
||||
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
|
||||
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
if copyLinks {
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
} else {
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
}
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Reset global error count
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included.file"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir/**"))
|
||||
if copyLinks {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
|
||||
} else {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
|
||||
}
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
if copyLinks {
|
||||
// Check 1 global errors one for each dangling symlink
|
||||
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
} else {
|
||||
// Check 0 global errors as dangling symlink copied properly
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
}
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
|
||||
sort.Sort(entries)
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Check listing through a symlink still works
|
||||
entries, err = f.List(ctx, "included.dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func TestFilterSymlinkCopyLinks(t *testing.T) {
|
||||
testFilterSymlink(t, true)
|
||||
}
|
||||
|
||||
func TestFilterSymlinkLinks(t *testing.T) {
|
||||
testFilterSymlink(t, false)
|
||||
}
|
||||
|
||||
func TestCopySymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file and a symlink to it
|
||||
r.WriteFile("src/file.txt", "hello world", when)
|
||||
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
|
||||
// Set fs into "-l/--links" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a symlink and it has the right contents
|
||||
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
|
||||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
}
|
||||
|
||||
@@ -5,19 +5,42 @@ package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
statxCheckOnce sync.Once
|
||||
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
statxCheckOnce.Do(func() {
|
||||
// Check statx() is available as it was only introduced in kernel 4.11
|
||||
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
|
||||
var stat unix.Statx_t
|
||||
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
||||
readMetadataFromFileFn = readMetadataFromFileStatx
|
||||
} else {
|
||||
readMetadataFromFileFn = readMetadataFromFileFstatat
|
||||
}
|
||||
})
|
||||
return readMetadataFromFileFn(o, m)
|
||||
}
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
|
||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
flags = 0
|
||||
}
|
||||
var stat unix.Statx_t
|
||||
// statx() was added to Linux in kernel 4.11
|
||||
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
|
||||
unix.STATX_TYPE | // Want stx_mode & S_IFMT
|
||||
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
|
||||
@@ -45,3 +68,36 @@ func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
setTime("btime", stat.Btime)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
|
||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
flags = 0
|
||||
}
|
||||
var stat unix.Stat_t
|
||||
// fstatat() was added to Linux in kernel 2.6.16
|
||||
// Go only supports 2.6.32 or later
|
||||
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
||||
if stat.Rdev != 0 {
|
||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
||||
}
|
||||
setTime := func(key string, t unix.Timespec) {
|
||||
// The types of t.Sec and t.Nsec vary from int32 to int64 on
|
||||
// different Linux architectures so we need to cast them to
|
||||
// int64 here and hence need to quiet the linter about
|
||||
// unnecessary casts.
|
||||
//
|
||||
// nolint: unconvert
|
||||
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.Atim)
|
||||
setTime("mtime", stat.Mtim)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -13,7 +12,7 @@ import (
|
||||
|
||||
// Check we can remove an open file
|
||||
func TestRemove(t *testing.T) {
|
||||
fd, err := ioutil.TempFile("", "rclone-remove-test")
|
||||
fd, err := os.CreateTemp("", "rclone-remove-test")
|
||||
require.NoError(t, err)
|
||||
name := fd.Name()
|
||||
defer func() {
|
||||
|
||||
@@ -69,6 +69,11 @@ func (w *BinWriter) WritePu64(val int64) {
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteP64 writes an signed long as unsigned varint
|
||||
func (w *BinWriter) WriteP64(val int64) {
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteString writes a zero-terminated string
|
||||
func (w *BinWriter) WriteString(str string) {
|
||||
buf := []byte(str)
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
@@ -86,13 +85,19 @@ func init() {
|
||||
Name: "mailru",
|
||||
Description: "Mail.ru Cloud",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Name: "pass",
|
||||
Help: `Password.
|
||||
|
||||
This must be an app password - rclone will not work with your normal
|
||||
password. See the Configuration section in the docs for how to make an
|
||||
app password.
|
||||
`,
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
@@ -209,7 +214,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -641,12 +646,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
modTime := time.Unix(mTime, 0)
|
||||
modTime := time.Unix(int64(item.Mtime), 0)
|
||||
|
||||
isDir, err := f.isDir(item.Kind, remote)
|
||||
if err != nil {
|
||||
@@ -1660,7 +1660,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Attempt to put by calculating hash in memory
|
||||
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
|
||||
fileBuf, err = ioutil.ReadAll(in)
|
||||
fileBuf, err = io.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1703,7 +1703,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if size <= mrhash.Size {
|
||||
// Optimize upload: skip extra request if data fits in the hash buffer.
|
||||
if fileBuf == nil {
|
||||
fileBuf, err = ioutil.ReadAll(wrapIn)
|
||||
fileBuf, err = io.ReadAll(wrapIn)
|
||||
}
|
||||
if fileHash == nil && err == nil {
|
||||
fileHash = mrhash.Sum(fileBuf)
|
||||
@@ -2058,7 +2058,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
||||
req.WritePu16(0) // revision
|
||||
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
|
||||
req.WritePu64(o.size)
|
||||
req.WritePu64(o.modTime.Unix())
|
||||
req.WriteP64(o.modTime.Unix())
|
||||
req.WritePu32(0)
|
||||
req.Write(o.mrHash)
|
||||
|
||||
@@ -2214,7 +2214,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
fs.Debugf(o, "Server returned full content instead of range")
|
||||
if start > 0 {
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
_, err = io.CopyN(io.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return nil, err
|
||||
|
||||
@@ -58,9 +58,10 @@ func init() {
|
||||
Description: "Mega",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
@@ -83,6 +84,17 @@ than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_https",
|
||||
Help: `Use HTTPS for transfers.
|
||||
|
||||
MEGA uses plain text HTTP connections by default.
|
||||
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
|
||||
Enabling this will force MEGA to use HTTPS for all transfers.
|
||||
HTTPS is normally not necessary since all data is already encrypted anyway.
|
||||
Enabling it will increase CPU usage and add network overhead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -100,6 +112,7 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UseHTTPS bool `config:"use_https"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -204,6 +217,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -575,7 +574,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
data = data[:limit]
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
|
||||
return io.NopCloser(bytes.NewBuffer(data)), nil
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
@@ -583,7 +582,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
data, err := ioutil.ReadAll(in)
|
||||
data, err := io.ReadAll(in)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update memory object: %w", err)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -66,11 +65,13 @@ HTTP is provided primarily for debugging purposes.`,
|
||||
Help: `Domain+path of NetStorage host to connect to.
|
||||
|
||||
Format should be ` + "`<domain>/<internal folders>`",
|
||||
Required: true,
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "account",
|
||||
Help: "Set the NetStorage account name",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Set the NetStorage account name",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret",
|
||||
Help: `Set the NetStorage account secret/G2O key for authentication.
|
||||
@@ -820,6 +821,8 @@ func (f *Fs) getAuth(req *http.Request) error {
|
||||
// Set Authorization header
|
||||
dataHeader := generateDataHeader(f)
|
||||
path := req.URL.RequestURI()
|
||||
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
|
||||
actionHeader := req.Header["X-Akamai-ACS-Action"][0]
|
||||
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
|
||||
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)
|
||||
@@ -972,7 +975,7 @@ func (o *Object) netStorageUploadRequest(ctx context.Context, in io.Reader, src
|
||||
URL = o.fs.url(src.Remote())
|
||||
}
|
||||
if strings.HasSuffix(URL, ".rclonelink") {
|
||||
bits, err := ioutil.ReadAll(in)
|
||||
bits, err := io.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1058,7 +1061,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
|
||||
if strings.HasSuffix(URL, ".rclonelink") && o.target != "" {
|
||||
fs.Infof(nil, "Converting a symlink to the rclonelink file on download %q", URL)
|
||||
reader := strings.NewReader(o.target)
|
||||
readcloser := ioutil.NopCloser(reader)
|
||||
readcloser := io.NopCloser(reader)
|
||||
return readcloser, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -126,6 +126,7 @@ type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
||||
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
||||
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
|
||||
@@ -131,10 +131,11 @@ Note that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "drive_type",
|
||||
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
|
||||
@@ -148,7 +149,8 @@ This isn't normally needed, but in special circumstances you might
|
||||
know the folder ID that you wish to access but not be able to get
|
||||
there through a path traversal.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_scopes",
|
||||
Help: `Set scopes to be requested by rclone.
|
||||
@@ -196,7 +198,9 @@ listing, set this option.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||
|
||||
This will only work if you are copying between two OneDrive *Personal* drives AND
|
||||
the files to copy are already shared between them. In other cases, rclone will
|
||||
@@ -257,6 +261,67 @@ this flag there.
|
||||
Help: `Set the password for links created by the link command.
|
||||
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Default: "auto",
|
||||
Help: `Specify the hash in use for the backend.
|
||||
|
||||
This specifies the hash type in use. If set to "auto" it will use the
|
||||
default hash which is QuickXorHash.
|
||||
|
||||
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
|
||||
Personal. For 1.62 and later the default is to use a QuickXorHash for
|
||||
all onedrive types. If an SHA1 hash is desired then set this option
|
||||
accordingly.
|
||||
|
||||
From July 2023 QuickXorHash will be the only available hash for
|
||||
both OneDrive for Business and OneDriver Personal.
|
||||
|
||||
This can be set to "none" to not use any hashes.
|
||||
|
||||
If the hash requested does not exist on the object, it will be
|
||||
returned as an empty string which is treated as a missing hash by
|
||||
rclone.
|
||||
`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "auto",
|
||||
Help: "Rclone chooses the best hash",
|
||||
}, {
|
||||
Value: "quickxor",
|
||||
Help: "QuickXor",
|
||||
}, {
|
||||
Value: "sha1",
|
||||
Help: "SHA1",
|
||||
}, {
|
||||
Value: "sha256",
|
||||
Help: "SHA256",
|
||||
}, {
|
||||
Value: "crc32",
|
||||
Help: "CRC32",
|
||||
}, {
|
||||
Value: "none",
|
||||
Help: "None - don't use any hashes",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "av_override",
|
||||
Default: false,
|
||||
Help: `Allows download of files the server thinks has a virus.
|
||||
|
||||
The onedrive/sharepoint server may check files uploaded with an Anti
|
||||
Virus checker. If it detects any potential viruses or malware it will
|
||||
block download of the file.
|
||||
|
||||
In this case you will see a message like this
|
||||
|
||||
server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
|
||||
|
||||
If you are 100% sure you want to download this file anyway then use
|
||||
the --onedrive-av-override flag, or av_override = true in the config
|
||||
file.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -511,7 +576,7 @@ Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
@@ -597,6 +662,8 @@ type Options struct {
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
HashType string `config:"hash_type"`
|
||||
AVOverride bool `config:"av_override"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -613,6 +680,7 @@ type Fs struct {
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
hashType hash.Type // type of the hash we are using
|
||||
}
|
||||
|
||||
// Object describes a OneDrive object
|
||||
@@ -626,8 +694,7 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
hash string // Hash of the content, usually QuickXorHash but set as hash_type
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
}
|
||||
|
||||
@@ -882,6 +949,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
hashType: QuickXorHashType,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -891,6 +959,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Set the user defined hash
|
||||
if opt.HashType == "auto" || opt.HashType == "" {
|
||||
opt.HashType = QuickXorHashType.String()
|
||||
}
|
||||
err = f.hashType.Set(opt.HashType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
@@ -1556,10 +1633,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(QuickXorHashType)
|
||||
return hash.Set(f.hashType)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without account.
|
||||
@@ -1674,6 +1748,10 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
token := make(chan struct{}, f.ci.Checkers)
|
||||
var wg sync.WaitGroup
|
||||
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to list %q: %v", path, err)
|
||||
return nil
|
||||
}
|
||||
err = entries.ForObjectError(func(obj fs.Object) error {
|
||||
o, ok := obj.(*Object)
|
||||
if !ok {
|
||||
@@ -1768,14 +1846,8 @@ func (o *Object) rootPath() string {
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.driveType == driveTypePersonal {
|
||||
if t == hash.SHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == QuickXorHashType {
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
if t == o.fs.hashType {
|
||||
return o.hash, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1806,16 +1878,23 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
file := info.GetFile()
|
||||
if file != nil {
|
||||
o.mimeType = file.MimeType
|
||||
if file.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
}
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.quickxorhash = hex.EncodeToString(h)
|
||||
o.hash = ""
|
||||
switch o.fs.hashType {
|
||||
case QuickXorHashType:
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.hash = hex.EncodeToString(h)
|
||||
}
|
||||
}
|
||||
case hash.SHA1:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
case hash.SHA256:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
|
||||
case hash.CRC32:
|
||||
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
|
||||
}
|
||||
}
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
@@ -1911,12 +1990,20 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var resp *http.Response
|
||||
opts := o.fs.newOptsCall(o.id, "GET", "/content")
|
||||
opts.Options = options
|
||||
if o.fs.opt.AVOverride {
|
||||
opts.Parameters = url.Values{"AVOverride": {"1"}}
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
|
||||
err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -7,51 +7,40 @@
|
||||
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
package quickxorhash
|
||||
|
||||
// This code was ported from the code snippet linked from
|
||||
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
// Which has the copyright
|
||||
// This code was ported from a fast C-implementation from
|
||||
// https://github.com/namazso/QuickXorHash
|
||||
// which has licenced as BSD Zero Clause License
|
||||
//
|
||||
// BSD Zero Clause License
|
||||
//
|
||||
// Copyright (c) 2022 namazso <admin@namazso.eu>
|
||||
//
|
||||
// Permission to use, copy, modify, and/or distribute this software for any
|
||||
// purpose with or without fee is hereby granted.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
// AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// ------------------------------------------------------------------------------
|
||||
// Copyright (c) 2016 Microsoft Corporation
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
// ------------------------------------------------------------------------------
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
import "hash"
|
||||
|
||||
const (
|
||||
// BlockSize is the preferred size for hashing
|
||||
BlockSize = 64
|
||||
// Size of the output checksum
|
||||
Size = 20
|
||||
bitsInLastCell = 32
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = (widthInBits-1)/64 + 1
|
||||
Size = 20
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = shift * widthInBits
|
||||
)
|
||||
|
||||
type quickXorHash struct {
|
||||
data [dataSize]uint64
|
||||
lengthSoFar uint64
|
||||
shiftSoFar int
|
||||
data [dataSize]byte
|
||||
size uint64
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the quickXorHash checksum.
|
||||
@@ -70,94 +59,37 @@ func New() hash.Hash {
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
currentshift := q.shiftSoFar
|
||||
|
||||
// The bitvector where we'll start xoring
|
||||
vectorArrayIndex := currentshift / 64
|
||||
|
||||
// The position within the bit vector at which we begin xoring
|
||||
vectorOffset := currentshift % 64
|
||||
iterations := len(p)
|
||||
if iterations > widthInBits {
|
||||
iterations = widthInBits
|
||||
var i int
|
||||
// fill last remain
|
||||
lastRemain := q.size % dataSize
|
||||
if lastRemain != 0 {
|
||||
i += xorBytes(q.data[lastRemain:], p)
|
||||
}
|
||||
|
||||
for i := 0; i < iterations; i++ {
|
||||
isLastCell := vectorArrayIndex == len(q.data)-1
|
||||
var bitsInVectorCell int
|
||||
if isLastCell {
|
||||
bitsInVectorCell = bitsInLastCell
|
||||
} else {
|
||||
bitsInVectorCell = 64
|
||||
}
|
||||
|
||||
// There's at least 2 bitvectors before we reach the end of the array
|
||||
if vectorOffset <= bitsInVectorCell-8 {
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
|
||||
}
|
||||
} else {
|
||||
index1 := vectorArrayIndex
|
||||
var index2 int
|
||||
if isLastCell {
|
||||
index2 = 0
|
||||
} else {
|
||||
index2 = vectorArrayIndex + 1
|
||||
}
|
||||
low := byte(bitsInVectorCell - vectorOffset)
|
||||
|
||||
xoredByte := byte(0)
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
xoredByte ^= p[j]
|
||||
}
|
||||
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
|
||||
q.data[index2] ^= uint64(xoredByte) >> low
|
||||
}
|
||||
vectorOffset += shift
|
||||
for vectorOffset >= bitsInVectorCell {
|
||||
if isLastCell {
|
||||
vectorArrayIndex = 0
|
||||
} else {
|
||||
vectorArrayIndex = vectorArrayIndex + 1
|
||||
}
|
||||
vectorOffset -= bitsInVectorCell
|
||||
if i != len(p) {
|
||||
for len(p)-i >= dataSize {
|
||||
i += xorBytes(q.data[:], p[i:])
|
||||
}
|
||||
xorBytes(q.data[:], p[i:])
|
||||
}
|
||||
|
||||
// Update the starting position in a circular shift pattern
|
||||
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
|
||||
|
||||
q.lengthSoFar += uint64(len(p))
|
||||
|
||||
q.size += uint64(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Calculate the current checksum
|
||||
func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// Output the data as little endian bytes
|
||||
ph := 0
|
||||
for i := 0; i < len(q.data)-1; i++ {
|
||||
d := q.data[i]
|
||||
_ = h[ph+7] // bounds check
|
||||
h[ph+0] = byte(d >> (8 * 0))
|
||||
h[ph+1] = byte(d >> (8 * 1))
|
||||
h[ph+2] = byte(d >> (8 * 2))
|
||||
h[ph+3] = byte(d >> (8 * 3))
|
||||
h[ph+4] = byte(d >> (8 * 4))
|
||||
h[ph+5] = byte(d >> (8 * 5))
|
||||
h[ph+6] = byte(d >> (8 * 6))
|
||||
h[ph+7] = byte(d >> (8 * 7))
|
||||
ph += 8
|
||||
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
||||
for i := 0; i < dataSize; i++ {
|
||||
shift := (i * 11) % 160
|
||||
shiftBytes := shift / 8
|
||||
shiftBits := shift % 8
|
||||
shifted := int(q.data[i]) << shiftBits
|
||||
h[shiftBytes] ^= byte(shifted)
|
||||
h[shiftBytes+1] ^= byte(shifted >> 8)
|
||||
}
|
||||
// remaining 32 bits
|
||||
d := q.data[len(q.data)-1]
|
||||
h[Size-4] = byte(d >> (8 * 0))
|
||||
h[Size-3] = byte(d >> (8 * 1))
|
||||
h[Size-2] = byte(d >> (8 * 2))
|
||||
h[Size-1] = byte(d >> (8 * 3))
|
||||
h[0] ^= h[20]
|
||||
|
||||
// XOR the file length with the least significant bits in little endian format
|
||||
d = q.lengthSoFar
|
||||
d := q.size
|
||||
h[Size-8] ^= byte(d >> (8 * 0))
|
||||
h[Size-7] ^= byte(d >> (8 * 1))
|
||||
h[Size-6] ^= byte(d >> (8 * 2))
|
||||
@@ -174,7 +106,7 @@ func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// It does not change the underlying hash state.
|
||||
func (q *quickXorHash) Sum(b []byte) []byte {
|
||||
hash := q.checkSum()
|
||||
return append(b, hash[:]...)
|
||||
return append(b, hash[:Size]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
@@ -196,8 +128,10 @@ func (q *quickXorHash) BlockSize() int {
|
||||
}
|
||||
|
||||
// Sum returns the quickXorHash checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
func Sum(data []byte) (h [Size]byte) {
|
||||
var d quickXorHash
|
||||
_, _ = d.Write(data)
|
||||
return d.checkSum()
|
||||
s := d.checkSum()
|
||||
copy(h[:], s[:])
|
||||
return h
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -166,3 +167,16 @@ func TestReset(t *testing.T) {
|
||||
|
||||
// check interface
|
||||
var _ hash.Hash = (*quickXorHash)(nil)
|
||||
|
||||
func BenchmarkQuickXorHash(b *testing.B) {
|
||||
b.SetBytes(1 << 20)
|
||||
buf := make([]byte, 1<<20)
|
||||
rand.Read(buf)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(buf)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
20
backend/onedrive/quickxorhash/xor.go
Normal file
20
backend/onedrive/quickxorhash/xor.go
Normal file
@@ -0,0 +1,20 @@
|
||||
//go:build !go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
n := len(dst)
|
||||
if len(src) < n {
|
||||
n = len(src)
|
||||
}
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
dst = dst[:n]
|
||||
//src = src[:n]
|
||||
src = src[:len(dst)] // remove bounds check in loop
|
||||
for i := range dst {
|
||||
dst[i] ^= src[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
9
backend/onedrive/quickxorhash/xor_1.20.go
Normal file
9
backend/onedrive/quickxorhash/xor_1.20.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
import "crypto/subtle"
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
||||
@@ -42,9 +42,10 @@ func init() {
|
||||
Description: "OpenDrive",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Required: true,
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Password.",
|
||||
|
||||
145
backend/oracleobjectstorage/byok.go
Normal file
145
backend/oracleobjectstorage/byok.go
Normal file
@@ -0,0 +1,145 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
)
|
||||
|
||||
const (
|
||||
sseDefaultAlgorithm = "AES256"
|
||||
)
|
||||
|
||||
func getSha256(p []byte) []byte {
|
||||
h := sha256.New()
|
||||
h.Write(p)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
func validateSSECustomerKeyOptions(opt *Options) error {
|
||||
if opt.SSEKMSKeyID != "" && (opt.SSECustomerKeyFile != "" || opt.SSECustomerKey != "") {
|
||||
return errors.New("oos: can't use vault sse_kms_key_id and local sse_customer_key at the same time")
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyFile != "" {
|
||||
return errors.New("oos: can't use sse_customer_key and sse_customer_key_file at the same time")
|
||||
}
|
||||
if opt.SSEKMSKeyID != "" {
|
||||
return nil
|
||||
}
|
||||
err := populateSSECustomerKeys(opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateSSECustomerKeys(opt *Options) error {
|
||||
if opt.SSECustomerKeyFile != "" {
|
||||
// Reads the base64-encoded AES key data from the specified file and computes its SHA256 checksum
|
||||
data, err := os.ReadFile(expandPath(opt.SSECustomerKeyFile))
|
||||
if err != nil {
|
||||
return fmt.Errorf("oos: error reading sse_customer_key_file: %v", err)
|
||||
}
|
||||
opt.SSECustomerKey = strings.TrimSpace(string(data))
|
||||
}
|
||||
if opt.SSECustomerKey != "" {
|
||||
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("oos: Could not decode sse_customer_key_file: %w", err)
|
||||
}
|
||||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||
if opt.SSECustomerKeySha256 == "" {
|
||||
opt.SSECustomerKeySha256 = sha256Checksum
|
||||
} else {
|
||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
if opt.SSECustomerAlgorithm == "" {
|
||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/usingyourencryptionkeys.htm
|
||||
func useBYOKPutObject(fs *Fs, request *objectstorage.PutObjectRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKHeadObject(fs *Fs, request *objectstorage.HeadObjectRequest) {
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKGetObject(fs *Fs, request *objectstorage.GetObjectRequest) {
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
178
backend/oracleobjectstorage/client.go
Normal file
178
backend/oracleobjectstorage/client.go
Normal file
@@ -0,0 +1,178 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/common/auth"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
func expandPath(filepath string) (expandedPath string) {
|
||||
if filepath == "" {
|
||||
return filepath
|
||||
}
|
||||
cleanedPath := path.Clean(filepath)
|
||||
expandedPath = cleanedPath
|
||||
if strings.HasPrefix(cleanedPath, "~") {
|
||||
rest := cleanedPath[2:]
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return expandedPath
|
||||
}
|
||||
expandedPath = path.Join(home, rest)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
|
||||
switch opt.Provider {
|
||||
case instancePrincipal:
|
||||
return auth.InstancePrincipalConfigurationProvider()
|
||||
case userPrincipal:
|
||||
expandConfigFilePath := expandPath(opt.ConfigFile)
|
||||
if expandConfigFilePath != "" && !fileExists(expandConfigFilePath) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", expandConfigFilePath)
|
||||
}
|
||||
return common.CustomProfileConfigProvider(expandConfigFilePath, opt.ConfigProfile), nil
|
||||
case resourcePrincipal:
|
||||
return auth.ResourcePrincipalConfigurationProvider()
|
||||
case noAuth:
|
||||
fs.Infof("client", "using no auth provider")
|
||||
return getNoAuthConfiguration()
|
||||
default:
|
||||
}
|
||||
return common.DefaultConfigProvider(), nil
|
||||
}
|
||||
|
||||
func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.ObjectStorageClient, error) {
|
||||
p, err := getConfigurationProvider(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(p)
|
||||
if err != nil {
|
||||
fs.Errorf(opt.Provider, "failed to create object storage client, %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if opt.Region != "" {
|
||||
client.SetRegion(opt.Region)
|
||||
}
|
||||
modifyClient(ctx, opt, &client.BaseClient)
|
||||
return &client, err
|
||||
}
|
||||
|
||||
func fileExists(filePath string) bool {
|
||||
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func modifyClient(ctx context.Context, opt *Options, client *common.BaseClient) {
|
||||
client.HTTPClient = getHTTPClient(ctx)
|
||||
if opt.Provider == noAuth {
|
||||
client.Signer = getNoAuthSigner()
|
||||
}
|
||||
}
|
||||
|
||||
// getClient makes http client according to the global options
|
||||
// this has rclone specific options support like dump headers, body etc.
|
||||
func getHTTPClient(ctx context.Context) *http.Client {
|
||||
return fshttp.NewClient(ctx)
|
||||
}
|
||||
|
||||
var retryErrorCodes = []int{
|
||||
408, // Request Timeout
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// If this is an ocierr object, try and extract more useful information to determine if we should retry
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retryable
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetCode() == "RequestTimeout" {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
// Ok, not an oci error, check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
func getNoAuthConfiguration() (common.ConfigurationProvider, error) {
|
||||
return &noAuthConfigurator{}, nil
|
||||
}
|
||||
|
||||
func getNoAuthSigner() common.HTTPRequestSigner {
|
||||
return &noAuthSigner{}
|
||||
}
|
||||
|
||||
type noAuthConfigurator struct {
|
||||
}
|
||||
|
||||
type noAuthSigner struct {
|
||||
}
|
||||
|
||||
func (n *noAuthSigner) Sign(*http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) PrivateRSAKey() (*rsa.PrivateKey, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) KeyID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) TenancyOCID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) UserOCID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) KeyFingerprint() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) Region() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) AuthType() (common.AuthConfig, error) {
|
||||
return common.AuthConfig{
|
||||
AuthType: common.UnknownAuthenticationType,
|
||||
IsFromConfigFile: false,
|
||||
OboToken: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ common.ConfigurationProvider = &noAuthConfigurator{}
|
||||
_ common.HTTPRequestSigner = &noAuthSigner{}
|
||||
)
|
||||
228
backend/oracleobjectstorage/command.go
Normal file
228
backend/oracleobjectstorage/command.go
Normal file
@@ -0,0 +1,228 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Command Interface Implementation
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const (
|
||||
operationRename = "rename"
|
||||
operationListMultiPart = "list-multipart-uploads"
|
||||
operationCleanup = "cleanup"
|
||||
)
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: operationRename,
|
||||
Short: "change the name of an object",
|
||||
Long: `This command can be used to rename a object.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||
`,
|
||||
Opts: nil,
|
||||
}, {
|
||||
Name: operationListMultiPart,
|
||||
Short: "List the unfinished multipart uploads",
|
||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||
|
||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||
|
||||
It returns a dictionary of buckets with values as lists of unfinished
|
||||
multipart uploads.
|
||||
|
||||
You can call it with no bucket in which case it lists all bucket, with
|
||||
a bucket or with a bucket and path.
|
||||
|
||||
{
|
||||
"test-bucket": [
|
||||
{
|
||||
"namespace": "test-namespace",
|
||||
"bucket": "test-bucket",
|
||||
"object": "600m.bin",
|
||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||
"storageTier": "Standard"
|
||||
}
|
||||
]
|
||||
`,
|
||||
}, {
|
||||
Name: operationCleanup,
|
||||
Short: "Remove unfinished multipart uploads.",
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/*
|
||||
Command the backend to run a named command
|
||||
|
||||
The command run is name
|
||||
args may be used to read arguments from
|
||||
opts may be used to read optional arguments from
|
||||
|
||||
The result should be capable of being JSON encoded
|
||||
If it is a string or a []string it will be shown to the user
|
||||
otherwise it will be JSON encoded and shown to the user like that
|
||||
*/
|
||||
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||
opt map[string]string) (result interface{}, err error) {
|
||||
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
||||
switch commandName {
|
||||
case operationRename:
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("path to object or its new name to rename is empty")
|
||||
}
|
||||
remote := args[0]
|
||||
newName := args[1]
|
||||
return f.rename(ctx, remote, newName)
|
||||
case operationListMultiPart:
|
||||
return f.listMultipartUploadsAll(ctx)
|
||||
case operationCleanup:
|
||||
maxAge := 24 * time.Hour
|
||||
if opt["max-age"] != "" {
|
||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad max-age: %w", err)
|
||||
}
|
||||
}
|
||||
return nil, f.cleanUp(ctx, maxAge)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
|
||||
if remote == "" {
|
||||
return nil, fmt.Errorf("path to object file cannot be empty")
|
||||
}
|
||||
if newName == "" {
|
||||
return nil, fmt.Errorf("the object's new name cannot be empty")
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
bucketName, objectPath := o.split()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
|
||||
if strings.HasPrefix(objectPath, bucketName) {
|
||||
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
|
||||
objectPath, bucketName)
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
details := objectstorage.RenameObjectDetails{
|
||||
SourceName: common.String(objectPath),
|
||||
NewName: common.String(newName),
|
||||
}
|
||||
request := objectstorage.RenameObjectRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
RenameObjectDetails: details,
|
||||
OpcClientRequestId: nil,
|
||||
RequestMetadata: common.RequestMetadata{},
|
||||
}
|
||||
var response objectstorage.RenameObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RenameObject(ctx, request)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
|
||||
return "renamed successfully", nil
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
|
||||
err error) {
|
||||
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
|
||||
bucket, directory := f.split("")
|
||||
if bucket != "" {
|
||||
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
return uploadsMap, nil
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
bucket := entry.Remote()
|
||||
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
|
||||
if listErr != nil {
|
||||
err = listErr
|
||||
fs.Errorf(f, "%v", err)
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
}
|
||||
return uploadsMap, err
|
||||
}
|
||||
|
||||
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
||||
//
|
||||
// Note that rather lazily we treat key as a prefix, so it matches
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
|
||||
uploads = []*objectstorage.MultipartUpload{}
|
||||
req := objectstorage.ListMultipartUploadsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
|
||||
var response objectstorage.ListMultipartUploadsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploads(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
// fs.Debugf(f, "failed to list multi part uploads %v", err)
|
||||
return uploads, err
|
||||
}
|
||||
for index, item := range response.Items {
|
||||
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
||||
continue
|
||||
}
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploads, nil
|
||||
}
|
||||
156
backend/oracleobjectstorage/copy.go
Normal file
156
backend/oracleobjectstorage/copy.go
Normal file
@@ -0,0 +1,156 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement Copier is an optional interfaces for Fs
|
||||
//------------------------------------------------------------
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
// This is stored with the remote path given
|
||||
// It returns the destination Object and a possible error
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
// fs.Debugf(f, "copying %v to %v", src.Remote(), remote)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
// fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := f.copy(ctx, dstObj, srcObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// copy does a server-side copy from dstObj <- srcObj
|
||||
//
|
||||
// If newInfo is nil then the metadata will be copied otherwise it
|
||||
// will be replaced with newInfo
|
||||
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err error) {
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
dstBucket, dstPath := dstObj.split()
|
||||
if dstBucket != srcBucket {
|
||||
exists, err := f.bucketExists(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
err = f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
copyObjectDetails := objectstorage.CopyObjectDetails{
|
||||
SourceObjectName: common.String(srcPath),
|
||||
DestinationRegion: common.String(dstObj.fs.opt.Region),
|
||||
DestinationNamespace: common.String(dstObj.fs.opt.Namespace),
|
||||
DestinationBucket: common.String(dstBucket),
|
||||
DestinationObjectName: common.String(dstPath),
|
||||
DestinationObjectMetadata: metadataWithOpcPrefix(srcObj.meta),
|
||||
}
|
||||
req := objectstorage.CopyObjectRequest{
|
||||
NamespaceName: common.String(srcObj.fs.opt.Namespace),
|
||||
BucketName: common.String(srcBucket),
|
||||
CopyObjectDetails: copyObjectDetails,
|
||||
}
|
||||
useBYOKCopyObject(f, &req)
|
||||
var resp objectstorage.CopyObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CopyObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
workRequestID := resp.OpcWorkRequestId
|
||||
timeout := time.Duration(f.opt.CopyTimeout)
|
||||
dstName := dstObj.String()
|
||||
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/copyingobjects.htm
|
||||
// To enable server side copy object, customers will have to
|
||||
// grant policy to objectstorage service to manage object-family
|
||||
// Allow service objectstorage-<region_identifier> to manage object-family in tenancy
|
||||
// Another option to avoid the policy is to download and reupload the file.
|
||||
// This download upload will work for maximum file size limit of 5GB
|
||||
err = copyObjectWaitForWorkRequest(ctx, workRequestID, dstName, timeout, f.srv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType string, timeout time.Duration,
|
||||
client *objectstorage.ObjectStorageClient) error {
|
||||
|
||||
stateConf := &StateChangeConf{
|
||||
Pending: []string{
|
||||
string(objectstorage.WorkRequestStatusAccepted),
|
||||
string(objectstorage.WorkRequestStatusInProgress),
|
||||
string(objectstorage.WorkRequestStatusCanceling),
|
||||
},
|
||||
Target: []string{
|
||||
string(objectstorage.WorkRequestSummaryStatusCompleted),
|
||||
string(objectstorage.WorkRequestSummaryStatusCanceled),
|
||||
string(objectstorage.WorkRequestStatusFailed),
|
||||
},
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
|
||||
getWorkRequestRequest.WorkRequestId = wID
|
||||
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
|
||||
wr := &workRequestResponse.WorkRequest
|
||||
return workRequestResponse, string(wr.Status), err
|
||||
},
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
wrr, e := stateConf.WaitForStateContext(ctx, entityType)
|
||||
if e != nil {
|
||||
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, e)
|
||||
}
|
||||
|
||||
wr := wrr.(objectstorage.GetWorkRequestResponse).WorkRequest
|
||||
if wr.Status == objectstorage.WorkRequestStatusFailed {
|
||||
errorMessage, _ := getObjectStorageErrorFromWorkRequest(ctx, wID, client)
|
||||
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, errorMessage)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getObjectStorageErrorFromWorkRequest(ctx context.Context, workRequestID *string, client *objectstorage.ObjectStorageClient) (string, error) {
|
||||
req := objectstorage.ListWorkRequestErrorsRequest{}
|
||||
req.WorkRequestId = workRequestID
|
||||
res, err := client.ListWorkRequestErrors(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
allErrs := make([]string, 0)
|
||||
for _, errs := range res.Items {
|
||||
allErrs = append(allErrs, *errs.Message)
|
||||
}
|
||||
|
||||
errorMessage := strings.Join(allErrs, "\n")
|
||||
return errorMessage, nil
|
||||
}
|
||||
626
backend/oracleobjectstorage/object.go
Normal file
626
backend/oracleobjectstorage/object.go
Normal file
@@ -0,0 +1,626 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Object Interface Implementation
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const (
|
||||
metaMtime = "mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "md5chksum" // the meta key to store md5hash in
|
||||
// StandardTier object storage tier
|
||||
ociMetaPrefix = "opc-meta-"
|
||||
)
|
||||
|
||||
var archive = "archive"
|
||||
var infrequentAccess = "infrequentaccess"
|
||||
var standard = "standard"
|
||||
|
||||
var storageTierMap = map[string]*string{
|
||||
archive: &archive,
|
||||
infrequentAccess: &infrequentAccess,
|
||||
standard: &standard,
|
||||
}
|
||||
|
||||
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
||||
|
||||
// Object describes a oci bucket object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
md5 string // MD5 hash if known
|
||||
bytes int64 // Size of the object
|
||||
lastModified time.Time // The modified time of the object if known
|
||||
meta map[string]string // The object metadata if known - may be nil
|
||||
mimeType string // Content-Type of the object
|
||||
|
||||
// Metadata as pointers to strings as they often won't be present
|
||||
storageTier *string // e.g. Standard
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
fs.Debugf(o, "trying to read metadata %v", o.remote)
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
info, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.decodeMetaDataHead(info)
|
||||
}
|
||||
|
||||
// headObject gets the metadata from the object unconditionally
|
||||
func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObjectResponse, err error) {
|
||||
bucketName, objectPath := o.split()
|
||||
req := objectstorage.HeadObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(objectPath),
|
||||
}
|
||||
useBYOKHeadObject(o.fs, &req)
|
||||
var response objectstorage.HeadObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = o.fs.srv.HeadObject(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
if svcErr, ok := err.(common.ServiceError); ok {
|
||||
if svcErr.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
fs.Errorf(o, "Failed to head object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucketName)
|
||||
return &response, err
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataHead(info *objectstorage.HeadObjectResponse) (err error) {
|
||||
return o.setMetaData(
|
||||
info.ContentLength,
|
||||
info.ContentMd5,
|
||||
info.ContentType,
|
||||
info.LastModified,
|
||||
info.StorageTier,
|
||||
info.OpcMeta)
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataObject(info *objectstorage.GetObjectResponse) (err error) {
|
||||
return o.setMetaData(
|
||||
info.ContentLength,
|
||||
info.ContentMd5,
|
||||
info.ContentType,
|
||||
info.LastModified,
|
||||
info.StorageTier,
|
||||
info.OpcMeta)
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(
|
||||
contentLength *int64,
|
||||
contentMd5 *string,
|
||||
contentType *string,
|
||||
lastModified *common.SDKTime,
|
||||
storageTier interface{},
|
||||
meta map[string]string) error {
|
||||
|
||||
if contentLength != nil {
|
||||
o.bytes = *contentLength
|
||||
}
|
||||
if contentMd5 != nil {
|
||||
md5, err := o.base64ToMd5(*contentMd5)
|
||||
if err == nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
o.meta = meta
|
||||
if o.meta == nil {
|
||||
o.meta = map[string]string{}
|
||||
}
|
||||
// Read MD5 from metadata if present
|
||||
if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
|
||||
md5, err := o.base64ToMd5(md5sumBase64)
|
||||
if err != nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
if lastModified == nil {
|
||||
o.lastModified = time.Now()
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
} else {
|
||||
o.lastModified = lastModified.Time
|
||||
}
|
||||
if contentType != nil {
|
||||
o.mimeType = *contentType
|
||||
}
|
||||
if storageTier == nil || storageTier == "" {
|
||||
o.storageTier = storageTierMap[standard]
|
||||
} else {
|
||||
tier := strings.ToLower(fmt.Sprintf("%v", storageTier))
|
||||
o.storageTier = storageTierMap[tier]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) base64ToMd5(md5sumBase64 string) (md5 string, err error) {
|
||||
md5sumBytes, err := base64.StdEncoding.DecodeString(md5sumBase64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", md5sumBase64, err)
|
||||
return "", err
|
||||
} else if len(md5sumBytes) != 16 {
|
||||
fs.Debugf(o, "failed to read md5sum from metadata %q: wrong length", md5sumBase64)
|
||||
return "", fmt.Errorf("failed to read md5sum from metadata %q: wrong length", md5sumBase64)
|
||||
}
|
||||
return hex.EncodeToString(md5sumBytes), nil
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// GetTier returns storage class as string
|
||||
func (o *Object) GetTier() string {
|
||||
if o.storageTier == nil || *o.storageTier == "" {
|
||||
return standard
|
||||
}
|
||||
return *o.storageTier
|
||||
}
|
||||
|
||||
// SetTier performs changing storage class
|
||||
func (o *Object) SetTier(tier string) (err error) {
|
||||
ctx := context.TODO()
|
||||
tier = strings.ToLower(tier)
|
||||
bucketName, bucketPath := o.split()
|
||||
tierEnum, ok := objectstorage.GetMappingStorageTierEnum(tier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier %v ", tier)
|
||||
}
|
||||
|
||||
req := objectstorage.UpdateObjectStorageTierRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
UpdateObjectStorageTierDetails: objectstorage.UpdateObjectStorageTierDetails{
|
||||
ObjectName: common.String(bucketPath),
|
||||
StorageTier: tierEnum,
|
||||
},
|
||||
}
|
||||
_, err = o.fs.srv.UpdateObjectStorageTier(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.storageTier = storageTierMap[tier]
|
||||
return err
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return ""
|
||||
}
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
// Convert base64 encoded md5 into lower case hex
|
||||
if o.md5 == "" {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return o.md5, nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned to the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||
if o.fs.ci.UseServerModTime {
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
// read mtime out of metadata if available
|
||||
d, ok := o.meta[metaMtime]
|
||||
if !ok || d == "" {
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := swift.FloatStringToTime(d)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.lastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
_, err = o.fs.Copy(ctx, o, o.remote)
|
||||
return err
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucketName, bucketPath := o.split()
|
||||
req := objectstorage.DeleteObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.DeleteObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Open object file
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
req := objectstorage.GetObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
o.applyGetObjectOptions(&req, options...)
|
||||
useBYOKGetObject(o.fs, &req)
|
||||
var resp objectstorage.GetObjectResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.srv.GetObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// read size from ContentLength or ContentRange
|
||||
bytes := resp.ContentLength
|
||||
if resp.ContentRange != nil {
|
||||
var contentRange = *resp.ContentRange
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
bytes = &i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
}
|
||||
err = o.decodeMetaDataObject(&resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.bytes = *bytes
|
||||
return resp.HTTPResponse().Body, nil
|
||||
}
|
||||
|
||||
// Update an object if it has changed
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
err = o.fs.makeBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine if we like upload single or multipart.
|
||||
size := src.Size()
|
||||
multipart := size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
metadata := map[string]string{
|
||||
metaMtime: swift.TimeToFloatString(modTime),
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non-multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sumBase64 string
|
||||
var md5sumHex string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
|
||||
if multipart {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
uploadRequest := transfer.UploadRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PartSize: common.Int64(chunkSize),
|
||||
AllowMultipartUploads: common.Bool(true),
|
||||
AllowParrallelUploads: common.Bool(true),
|
||||
ObjectStorageClient: o.fs.srv,
|
||||
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
|
||||
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
|
||||
Metadata: metadataWithOpcPrefix(metadata),
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
useBYOKUpload(o.fs, &uploadRequest)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
}
|
||||
uploadMgr := transfer.NewUploadManager()
|
||||
var uploadID = ""
|
||||
|
||||
defer atexit.OnError(&err, func() {
|
||||
if uploadID == "" {
|
||||
return
|
||||
}
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := o.fs.abortMultiPartUpload(
|
||||
context.Background(),
|
||||
bucketName,
|
||||
bucketPath,
|
||||
uploadID)
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
})()
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
|
||||
var httpResponse *http.Response
|
||||
if err == nil {
|
||||
if uploadResponse.Type == transfer.MultipartUpload {
|
||||
if uploadResponse.MultipartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
|
||||
}
|
||||
} else {
|
||||
if uploadResponse.SinglepartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
uploadID := ""
|
||||
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
|
||||
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
|
||||
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
|
||||
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, httpResponse, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o, "multipart streaming upload failed %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := objectstorage.PutObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PutObjectBody: io.NopCloser(in),
|
||||
OpcMeta: metadata,
|
||||
}
|
||||
if size >= 0 {
|
||||
req.ContentLength = common.Int64(size)
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
useBYOKPutObject(o.fs, &req)
|
||||
var resp objectstorage.PutObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.PutObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o, "put object failed %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
func (o *Object) applyPutOptions(req *objectstorage.PutObjectRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
req.CacheControl = common.String(value)
|
||||
case "content-disposition":
|
||||
req.ContentDisposition = common.String(value)
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.OpcMeta[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, options ...fs.OpenOption) {
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
_, value := option.Header()
|
||||
req.Range = &value
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
req.HttpResponseCacheControl = common.String(value)
|
||||
case "content-disposition":
|
||||
req.HttpResponseContentDisposition = common.String(value)
|
||||
case "content-encoding":
|
||||
req.HttpResponseContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.HttpResponseContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.HttpResponseContentType = common.String(value)
|
||||
case "range":
|
||||
// do nothing
|
||||
default:
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.Metadata[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func metadataWithOpcPrefix(src map[string]string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
for lowerKey, value := range src {
|
||||
if !strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
dst[ociMetaPrefix+lowerKey] = value
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
317
backend/oracleobjectstorage/options.go
Normal file
317
backend/oracleobjectstorage/options.go
Normal file
@@ -0,0 +1,317 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
const (
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
defaultUploadConcurrency = 10
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
)
|
||||
|
||||
const (
|
||||
userPrincipal = "user_principal_auth"
|
||||
instancePrincipal = "instance_principal_auth"
|
||||
resourcePrincipal = "resource_principal_auth"
|
||||
environmentAuth = "env_auth"
|
||||
noAuth = "no_auth"
|
||||
|
||||
userPrincipalHelpText = `use an OCI user and an API key for authentication.
|
||||
you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
|
||||
https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm`
|
||||
|
||||
instancePrincipalHelpText = `use instance principals to authorize an instance to make API calls.
|
||||
each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
|
||||
https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm`
|
||||
|
||||
resourcePrincipalHelpText = `use resource principals to make API calls`
|
||||
|
||||
environmentAuthHelpText = `automatically pickup the credentials from runtime(env), first one to provide auth wins`
|
||||
|
||||
noAuthHelpText = `no credentials needed, this is typically for reading public buckets`
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyFile string `config:"sse_customer_key_file"`
|
||||
SSECustomerKeySha256 string `config:"sse_customer_key_sha256"`
|
||||
}
|
||||
|
||||
func newOptions() []fs.Option {
|
||||
return []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your Auth Provider",
|
||||
Required: true,
|
||||
Default: environmentAuth,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: environmentAuth,
|
||||
Help: environmentAuthHelpText,
|
||||
}, {
|
||||
Value: userPrincipal,
|
||||
Help: userPrincipalHelpText,
|
||||
}, {
|
||||
Value: instancePrincipal,
|
||||
Help: instancePrincipalHelpText,
|
||||
}, {
|
||||
Value: resourcePrincipal,
|
||||
Help: resourcePrincipalHelpText,
|
||||
}, {
|
||||
Value: noAuth,
|
||||
Help: noAuthHelpText,
|
||||
}},
|
||||
}, {
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Object storage Region",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Object storage API.\n\nLeave blank to use the default endpoint for the region.",
|
||||
Required: false,
|
||||
}, {
|
||||
Name: "config_file",
|
||||
Help: "Path to OCI config file",
|
||||
Provider: userPrincipal,
|
||||
Default: "~/.oci/config",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "~/.oci/config",
|
||||
Help: "oci configuration file location",
|
||||
}},
|
||||
}, {
|
||||
Name: "config_profile",
|
||||
Help: "Profile name inside the oci config file",
|
||||
Provider: userPrincipal,
|
||||
Default: "Default",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Default",
|
||||
Help: "Use the default profile",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://github.com/oracle/oci-go-sdk/blob/master/objectstorage/storage_tier.go
|
||||
Name: "storage_tier",
|
||||
Help: "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm",
|
||||
Default: "Standard",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Standard",
|
||||
Help: "Standard storage tier, this is the default tier",
|
||||
}, {
|
||||
Value: "InfrequentAccess",
|
||||
Help: "InfrequentAccess storage tier",
|
||||
}, {
|
||||
Value: "Archive",
|
||||
Help: "Archive storage tier",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
|
||||
Note that "upload_concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high-speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5 MiB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
a file you can stream upload is 48 GiB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.
|
||||
|
||||
Increasing the chunk size decreases the accuracy of the progress
|
||||
statistics displayed with "-P" flag.
|
||||
`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: defaultUploadConcurrency,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy.
|
||||
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_timeout",
|
||||
Help: `Timeout for copy.
|
||||
|
||||
Copy is an asynchronous operation, specify timeout to wait for copy to succeed
|
||||
`,
|
||||
Default: defaultCopyTimeoutDuration,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't store MD5 checksum with object metadata.
|
||||
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// Any UTF-8 character is valid in a key, however it can't handle
|
||||
// invalid UTF-8 and / have a special meaning.
|
||||
//
|
||||
// The SDK can't seem to handle uploading files called '.
|
||||
// - initial / encoding
|
||||
// - doubled / encoding
|
||||
// - trailing / encoding
|
||||
// so that OSS keys are always valid file names
|
||||
Default: encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeDot,
|
||||
}, {
|
||||
Name: "leave_parts_on_error",
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
|
||||
It should be set to true for resuming uploads across different sessions.
|
||||
|
||||
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
|
||||
additional costs if not cleaned up.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
|
||||
It can also be needed if the user you are using does not have bucket
|
||||
creation permissions.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_file",
|
||||
Help: `To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
|
||||
with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
|
||||
encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is
|
||||
needed. For more information, see Using Your Own Keys for Server-Side Encryption
|
||||
(https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key_sha256",
|
||||
Help: `If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
|
||||
key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for
|
||||
Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: `if using your own master key in vault, this header specifies the
|
||||
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
|
||||
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
|
||||
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: `If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm.
|
||||
Object Storage supports "AES256" as the encryption algorithm. For more information, see
|
||||
Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}, {
|
||||
Value: sseDefaultAlgorithm,
|
||||
Help: sseDefaultAlgorithm,
|
||||
}},
|
||||
}}
|
||||
}
|
||||
698
backend/oracleobjectstorage/oracleobjectstorage.go
Normal file
698
backend/oracleobjectstorage/oracleobjectstorage.go
Normal file
@@ -0,0 +1,698 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package oracleobjectstorage provides an interface to the OCI object storage system.
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "oracleobjectstorage",
|
||||
Description: "Oracle Cloud Infrastructure Object Storage",
|
||||
Prefix: "oos",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: newOptions(),
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a remote object storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *objectstorage.ObjectStorageClient // the connection to the object storage
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// NewFs Initialize backend
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = validateSSECustomerKeyOptions(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
objectStorageClient, err := newObjectStorageClient(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep)))
|
||||
// Set pacer retries to 2 (1 try and 1 retry) because we are
|
||||
// relying on SDK retry mechanism, but we allow 2 attempts to
|
||||
// retry directory listings after XMLSyntaxError
|
||||
pc.SetRetries(2)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: objectStorageClient,
|
||||
cache: bucket.NewCache(),
|
||||
pacer: pc,
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(ctx, f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
// return an error with fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement backed that represents a remote object storage server
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return "oos:root"
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("oos:bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("oos:bucket %s, path %s", f.rootBucket, f.rootDirectory)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
bucketName, directory := f.split(dir)
|
||||
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
|
||||
if bucketName == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error
|
||||
|
||||
// list the objects into the function supplied from
|
||||
// the bucket and root supplied
|
||||
// (bucket, directory) is the starting directory
|
||||
// If prefix is set then it is removed from all file names
|
||||
// If addBucket is set then it adds the bucket to the start of the remotes generated
|
||||
// If recurse is set the function will recursively list
|
||||
// If limit is > 0 then it limits to that many files (must be less than 1000)
|
||||
// If hidden is set then it will list the hidden (deleted) files too.
|
||||
// if findFile is set it will look for files called (bucket, directory)
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int,
|
||||
fn listFn) (err error) {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
|
||||
delimiter := ""
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
chunkSize := 1000
|
||||
if limit > 0 {
|
||||
chunkSize = limit
|
||||
}
|
||||
var request = objectstorage.ListObjectsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucket),
|
||||
Prefix: common.String(directory),
|
||||
Limit: common.Int(chunkSize),
|
||||
Fields: common.String("name,size,etag,timeCreated,md5,timeModified,storageTier,archivalState"),
|
||||
}
|
||||
if delimiter != "" {
|
||||
request.Delimiter = common.String(delimiter)
|
||||
}
|
||||
|
||||
for {
|
||||
var resp objectstorage.ListObjectsResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = f.srv.ListObjects(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
if f.rootBucket == "" {
|
||||
// if listing from the root ignore wrong region requests returning
|
||||
// empty directory
|
||||
if reqErr, ok := err.(common.ServiceError); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.GetHTTPStatusCode() == http.StatusMovedPermanently {
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !recurse {
|
||||
for _, commonPrefix := range resp.ListObjects.Prefixes {
|
||||
if commonPrefix == "" {
|
||||
fs.Logf(f, "Nil common prefix received")
|
||||
continue
|
||||
}
|
||||
remote := commonPrefix
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &objectstorage.ObjectSummary{Name: &remote}, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range resp.Objects {
|
||||
object := &resp.Objects[i]
|
||||
// Finish if file name no longer has prefix
|
||||
//if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
//}
|
||||
remote := *object.Name
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
// fs.Debugf(f, "Odd name received %v", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
// Check for directory
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if isDirectory && len(remote) > 1 {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// end if no NextFileName
|
||||
if resp.NextStartWith == nil {
|
||||
break
|
||||
}
|
||||
request.Start = resp.NextStartWith
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectstorage.ObjectSummary, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
size := int64(0)
|
||||
if object.Size != nil {
|
||||
size = *object.Size
|
||||
}
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(size)
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
if f.opt.Provider == noAuth {
|
||||
return nil, fmt.Errorf("can't list buckets with %v provider, use a valid auth provider in config file", noAuth)
|
||||
}
|
||||
var request = objectstorage.ListBucketsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
CompartmentId: common.String(f.opt.Compartment),
|
||||
}
|
||||
var resp objectstorage.ListBucketsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.ListBuckets(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
bucketName := f.opt.Enc.ToStandardName(*item.Name)
|
||||
f.cache.MarkOK(bucketName)
|
||||
d := fs.NewDir(bucketName, item.TimeCreated.Time)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
if resp.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
request.Page = resp.OpcNextPage
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *objectstorage.ObjectSummary) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
if info.TimeModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = info.TimeModified.Time
|
||||
}
|
||||
if info.Md5 != nil {
|
||||
md5, err := o.base64ToMd5(*info.Md5)
|
||||
if err != nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
o.bytes = *info.Size
|
||||
o.storageTier = storageTierMap[strings.ToLower(string(info.StorageTier))]
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// Put the object into the bucket
|
||||
// Copy the reader in to the new object which is returned
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
bucketName, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucketName)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucketName string) error {
|
||||
if f.opt.NoCheckBucket {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Create(bucketName, func() error {
|
||||
details := objectstorage.CreateBucketDetails{
|
||||
Name: common.String(bucketName),
|
||||
CompartmentId: common.String(f.opt.Compartment),
|
||||
PublicAccessType: objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess,
|
||||
}
|
||||
req := objectstorage.CreateBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
CreateBucketDetails: details,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CreateBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q created with accessType %q", bucketName,
|
||||
objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess)
|
||||
}
|
||||
if svcErr, ok := err.(common.ServiceError); ok {
|
||||
if code := svcErr.GetCode(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, func() (bool, error) {
|
||||
return f.bucketExists(ctx, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// Check if the bucket exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after bucket deletion
|
||||
func (f *Fs) bucketExists(ctx context.Context, bucketName string) (bool, error) {
|
||||
req := objectstorage.HeadBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.HeadBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if err, ok := err.(common.ServiceError); ok {
|
||||
if err.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Rmdir delete an empty bucket. if bucket is not empty this is will fail with appropriate error
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
bucketName, directory := f.split(dir)
|
||||
if bucketName == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucketName, func() error {
|
||||
req := objectstorage.DeleteBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.DeleteBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q deleted", bucketName)
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
|
||||
if uploadID == "" {
|
||||
return nil
|
||||
}
|
||||
request := objectstorage.AbortMultipartUploadRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.AbortMultipartUpload(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge
|
||||
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration,
|
||||
uploads []*objectstorage.MultipartUpload) (err error) {
|
||||
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
|
||||
for _, upload := range uploads {
|
||||
if upload.TimeCreated != nil && upload.Object != nil && upload.UploadId != nil {
|
||||
age := time.Since(upload.TimeCreated.Time)
|
||||
what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Object,
|
||||
upload.TimeCreated, age)
|
||||
if age > maxAge {
|
||||
fs.Infof(f, "removing %s", what)
|
||||
if operations.SkipDestructive(ctx, what, "remove pending upload") {
|
||||
continue
|
||||
}
|
||||
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CleanUp removes all pending multipart uploads
|
||||
func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) {
|
||||
uploadsMap, err := f.listMultipartUploadsAll(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for bucketName, uploads := range uploadsMap {
|
||||
cleanErr := f.cleanUpBucket(ctx, bucketName, maxAge, uploads)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucketName, cleanErr)
|
||||
err = cleanErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CleanUp removes all pending multipart uploads older than 24 hours
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return f.cleanUp(ctx, 24*time.Hour)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement ListRer is an optional interfaces for Fs
|
||||
//------------------------------------------------------------
|
||||
|
||||
/*
|
||||
ListR lists the objects and directories of the Fs starting
|
||||
from dir recursively into out.
|
||||
|
||||
dir should be "" to start from the root, and should not
|
||||
have trailing slashes.
|
||||
|
||||
This should return ErrDirNotFound if the directory isn't
|
||||
found.
|
||||
|
||||
It should call callback for each tranche of entries read.
|
||||
These need not be returned in any particular order. If
|
||||
callback returns an error then the listing will stop
|
||||
immediately.
|
||||
|
||||
Don't implement this unless you have a more efficient way
|
||||
of listing recursively that doing a directory traversal.
|
||||
*/
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucketName, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucketName == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucketName := entry.Remote()
|
||||
err = listR(bucketName, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucketName)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucketName)
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
33
backend/oracleobjectstorage/oracleobjectstorage_test.go
Normal file
33
backend/oracleobjectstorage/oracleobjectstorage_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOracleObjectStorage:",
|
||||
TiersToTest: []string{"standard", "archive"},
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
@@ -0,0 +1,7 @@
|
||||
// Build for oracleobjectstorage for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package oracleobjectstorage
|
||||
362
backend/oracleobjectstorage/waiter.go
Normal file
362
backend/oracleobjectstorage/waiter.go
Normal file
@@ -0,0 +1,362 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
var refreshGracePeriod = 30 * time.Second
|
||||
|
||||
// StateRefreshFunc is a function type used for StateChangeConf that is
|
||||
// responsible for refreshing the item being watched for a state change.
|
||||
//
|
||||
// It returns three results. `result` is any object that will be returned
|
||||
// as the final object after waiting for state change. This allows you to
|
||||
// return the final updated object, for example an EC2 instance after refreshing
|
||||
// it. A nil result represents not found.
|
||||
//
|
||||
// `state` is the latest state of that object. And `err` is any error that
|
||||
// may have happened while refreshing the state.
|
||||
type StateRefreshFunc func() (result interface{}, state string, err error)
|
||||
|
||||
// StateChangeConf is the configuration struct used for `WaitForState`.
|
||||
type StateChangeConf struct {
|
||||
Delay time.Duration // Wait this time before starting checks
|
||||
Pending []string // States that are "allowed" and will continue trying
|
||||
Refresh StateRefreshFunc // Refreshes the current state
|
||||
Target []string // Target state
|
||||
Timeout time.Duration // The amount of time to wait before timeout
|
||||
MinTimeout time.Duration // Smallest time to wait before refreshes
|
||||
PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
|
||||
NotFoundChecks int // Number of times to allow not found (nil result from Refresh)
|
||||
|
||||
// This is to work around inconsistent APIs
|
||||
ContinuousTargetOccurrence int // Number of times the Target state has to occur continuously
|
||||
}
|
||||
|
||||
// WaitForStateContext watches an object and waits for it to achieve the state
|
||||
// specified in the configuration using the specified Refresh() func,
|
||||
// waiting the number of seconds specified in the timeout configuration.
|
||||
//
|
||||
// If the Refresh function returns an error, exit immediately with that error.
|
||||
//
|
||||
// If the Refresh function returns a state other than the Target state or one
|
||||
// listed in Pending, return immediately with an error.
|
||||
//
|
||||
// If the Timeout is exceeded before reaching the Target state, return an
|
||||
// error.
|
||||
//
|
||||
// Otherwise, the result is the result of the first call to the Refresh function to
|
||||
// reach the target state.
|
||||
//
|
||||
// Cancellation from the passed in context will cancel the refresh loop
|
||||
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
|
||||
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
||||
|
||||
notfoundTick := 0
|
||||
targetOccurrence := 0
|
||||
|
||||
// Set a default for times to check for not found
|
||||
if conf.NotFoundChecks == 0 {
|
||||
conf.NotFoundChecks = 20
|
||||
}
|
||||
|
||||
if conf.ContinuousTargetOccurrence == 0 {
|
||||
conf.ContinuousTargetOccurrence = 1
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Result interface{}
|
||||
State string
|
||||
Error error
|
||||
Done bool
|
||||
}
|
||||
|
||||
// Read every result from the refresh loop, waiting for a positive result.Done.
|
||||
resCh := make(chan Result, 1)
|
||||
// cancellation channel for the refresh loop
|
||||
cancelCh := make(chan struct{})
|
||||
|
||||
result := Result{}
|
||||
|
||||
go func() {
|
||||
defer close(resCh)
|
||||
|
||||
select {
|
||||
case <-time.After(conf.Delay):
|
||||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
|
||||
// start with 0 delay for the first loop
|
||||
var wait time.Duration
|
||||
|
||||
for {
|
||||
// store the last result
|
||||
resCh <- result
|
||||
|
||||
// wait and watch for cancellation
|
||||
select {
|
||||
case <-cancelCh:
|
||||
return
|
||||
case <-time.After(wait):
|
||||
// first round had no wait
|
||||
if wait == 0 {
|
||||
wait = 100 * time.Millisecond
|
||||
}
|
||||
}
|
||||
|
||||
res, currentState, err := conf.Refresh()
|
||||
result = Result{
|
||||
Result: res,
|
||||
State: currentState,
|
||||
Error: err,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
|
||||
// If we're waiting for the absence of a thing, then return
|
||||
if res == nil && len(conf.Target) == 0 {
|
||||
targetOccurrence++
|
||||
if conf.ContinuousTargetOccurrence == targetOccurrence {
|
||||
result.Done = true
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
// If we didn't find the resource, check if we have been
|
||||
// not finding it for a while, and if so, report an error.
|
||||
notfoundTick++
|
||||
if notfoundTick > conf.NotFoundChecks {
|
||||
result.Error = &NotFoundError{
|
||||
LastError: err,
|
||||
Retries: notfoundTick,
|
||||
}
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Reset the counter for when a resource isn't found
|
||||
notfoundTick = 0
|
||||
found := false
|
||||
|
||||
for _, allowed := range conf.Target {
|
||||
if currentState == allowed {
|
||||
found = true
|
||||
targetOccurrence++
|
||||
if conf.ContinuousTargetOccurrence == targetOccurrence {
|
||||
result.Done = true
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, allowed := range conf.Pending {
|
||||
if currentState == allowed {
|
||||
found = true
|
||||
targetOccurrence = 0
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found && len(conf.Pending) > 0 {
|
||||
result.Error = &UnexpectedStateError{
|
||||
LastError: err,
|
||||
State: result.State,
|
||||
ExpectedState: conf.Target,
|
||||
}
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Wait between refreshes using exponential backoff, except when
|
||||
// waiting for the target state to reoccur.
|
||||
if targetOccurrence == 0 {
|
||||
wait *= 2
|
||||
}
|
||||
|
||||
// If a poll interval has been specified, choose that interval.
|
||||
// Otherwise, bound the default value.
|
||||
if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
|
||||
wait = conf.PollInterval
|
||||
} else {
|
||||
if wait < conf.MinTimeout {
|
||||
wait = conf.MinTimeout
|
||||
} else if wait > 10*time.Second {
|
||||
wait = 10 * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
// fs.Debugf(entityType, "[TRACE] Waiting %s before next try", wait)
|
||||
}
|
||||
}()
|
||||
|
||||
// store the last value result from the refresh loop
|
||||
lastResult := Result{}
|
||||
|
||||
timeout := time.After(conf.Timeout)
|
||||
for {
|
||||
select {
|
||||
case r, ok := <-resCh:
|
||||
// channel closed, so return the last result
|
||||
if !ok {
|
||||
return lastResult.Result, lastResult.Error
|
||||
}
|
||||
|
||||
// we reached the intended state
|
||||
if r.Done {
|
||||
return r.Result, r.Error
|
||||
}
|
||||
|
||||
// still waiting, store the last result
|
||||
lastResult = r
|
||||
case <-ctx.Done():
|
||||
close(cancelCh)
|
||||
return nil, ctx.Err()
|
||||
case <-timeout:
|
||||
// fs.Debugf(entityType, "[WARN] WaitForState timeout after %s", conf.Timeout)
|
||||
// fs.Debugf(entityType, "[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
|
||||
|
||||
// cancel the goroutine and start our grace period timer
|
||||
close(cancelCh)
|
||||
timeout := time.After(refreshGracePeriod)
|
||||
|
||||
// we need a for loop and a label to break on, because we may have
|
||||
// an extra response value to read, but still want to wait for the
|
||||
// channel to close.
|
||||
forSelect:
|
||||
for {
|
||||
select {
|
||||
case r, ok := <-resCh:
|
||||
if r.Done {
|
||||
// the last refresh loop reached the desired state
|
||||
return r.Result, r.Error
|
||||
}
|
||||
|
||||
if !ok {
|
||||
// the goroutine returned
|
||||
break forSelect
|
||||
}
|
||||
|
||||
// target state not reached, save the result for the
|
||||
// TimeoutError and wait for the channel to close
|
||||
lastResult = r
|
||||
case <-ctx.Done():
|
||||
fs.Errorf(entityType, "Context cancellation detected, abandoning grace period")
|
||||
break forSelect
|
||||
case <-timeout:
|
||||
fs.Errorf(entityType, "WaitForState exceeded refresh grace period")
|
||||
break forSelect
|
||||
}
|
||||
}
|
||||
|
||||
return nil, &TimeoutError{
|
||||
LastError: lastResult.Error,
|
||||
LastState: lastResult.State,
|
||||
Timeout: conf.Timeout,
|
||||
ExpectedState: conf.Target,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NotFoundError resource not found error
|
||||
type NotFoundError struct {
|
||||
LastError error
|
||||
LastRequest interface{}
|
||||
LastResponse interface{}
|
||||
Message string
|
||||
Retries int
|
||||
}
|
||||
|
||||
func (e *NotFoundError) Error() string {
|
||||
if e.Message != "" {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
if e.Retries > 0 {
|
||||
return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
|
||||
}
|
||||
|
||||
return "couldn't find resource"
|
||||
}
|
||||
|
||||
func (e *NotFoundError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
|
||||
// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
|
||||
type UnexpectedStateError struct {
|
||||
LastError error
|
||||
State string
|
||||
ExpectedState []string
|
||||
}
|
||||
|
||||
func (e *UnexpectedStateError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"unexpected state '%s', wanted target '%s'. last error: %s",
|
||||
e.State,
|
||||
strings.Join(e.ExpectedState, ", "),
|
||||
e.LastError,
|
||||
)
|
||||
}
|
||||
|
||||
func (e *UnexpectedStateError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
|
||||
// TimeoutError is returned when WaitForState times out
|
||||
type TimeoutError struct {
|
||||
LastError error
|
||||
LastState string
|
||||
Timeout time.Duration
|
||||
ExpectedState []string
|
||||
}
|
||||
|
||||
func (e *TimeoutError) Error() string {
|
||||
expectedState := "resource to be gone"
|
||||
if len(e.ExpectedState) > 0 {
|
||||
expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
|
||||
}
|
||||
|
||||
extraInfo := make([]string, 0)
|
||||
if e.LastState != "" {
|
||||
extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
|
||||
}
|
||||
if e.Timeout > 0 {
|
||||
extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
|
||||
}
|
||||
|
||||
suffix := ""
|
||||
if len(extraInfo) > 0 {
|
||||
suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
|
||||
}
|
||||
|
||||
if e.LastError != nil {
|
||||
return fmt.Sprintf("timeout while waiting for %s%s: %s",
|
||||
expectedState, suffix, e.LastError)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("timeout while waiting for %s%s",
|
||||
expectedState, suffix)
|
||||
}
|
||||
|
||||
func (e *TimeoutError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
@@ -110,10 +110,11 @@ func init() {
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hostname",
|
||||
Help: `Hostname to connect to.
|
||||
@@ -138,7 +139,8 @@ with rclone authorize.
|
||||
This is only required when you want to use the cleanup command. Due to a bug
|
||||
in the pcloud API the required API does not support OAuth authentication so
|
||||
we have to rely on user password authentication for it.`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your pcloud password.",
|
||||
|
||||
536
backend/pikpak/api/types.go
Normal file
536
backend/pikpak/api/types.go
Normal file
@@ -0,0 +1,536 @@
|
||||
// Package api has type definitions for pikpak
|
||||
//
|
||||
// Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// "2022-09-17T14:31:06.056+08:00"
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the pikpak API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
if string(data) == "null" || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
KindOfFolder = "drive#folder"
|
||||
KindOfFile = "drive#file"
|
||||
KindOfFileList = "drive#fileList"
|
||||
KindOfResumable = "drive#resumable"
|
||||
KindOfForm = "drive#form"
|
||||
ThumbnailSizeS = "SIZE_SMALL"
|
||||
ThumbnailSizeM = "SIZE_MEDIUM"
|
||||
ThumbnailSizeL = "SIZE_LARGE"
|
||||
PhaseTypeComplete = "PHASE_TYPE_COMPLETE"
|
||||
PhaseTypeRunning = "PHASE_TYPE_RUNNING"
|
||||
PhaseTypeError = "PHASE_TYPE_ERROR"
|
||||
PhaseTypePending = "PHASE_TYPE_PENDING"
|
||||
UploadTypeForm = "UPLOAD_TYPE_FORM"
|
||||
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
|
||||
ListLimit = 100
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Error details api error from pikpak
|
||||
type Error struct {
|
||||
Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request"
|
||||
Code int `json:"error_code"`
|
||||
URL string `json:"error_url,omitempty"`
|
||||
Message string `json:"error_description,omitempty"`
|
||||
// can have either of `error_details` or `details``
|
||||
ErrorDetails []*ErrorDetails `json:"error_details,omitempty"`
|
||||
Details []*ErrorDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
// ErrorDetails contains further details of api error
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct {
|
||||
} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Message string `json:"message,omitempty"`
|
||||
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Filters contains parameters for filters when listing.
|
||||
//
|
||||
// possible operators
|
||||
// * in: a list of comma-separated string
|
||||
// * eq: "true" or "false"
|
||||
// * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00"
|
||||
type Filters struct {
|
||||
Phase map[string]string `json:"phase,omitempty"` // "in" or "eq"
|
||||
Trashed map[string]bool `json:"trashed,omitempty"` // "eq"
|
||||
Kind map[string]string `json:"kind,omitempty"` // "eq"
|
||||
Starred map[string]bool `json:"starred,omitempty"` // "eq"
|
||||
ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt"
|
||||
}
|
||||
|
||||
// Set sets filter values using field name, operator and corresponding value
|
||||
func (f *Filters) Set(field, operator, value string) {
|
||||
if value == "" {
|
||||
// UNSET for empty values
|
||||
return
|
||||
}
|
||||
r := reflect.ValueOf(f)
|
||||
fd := reflect.Indirect(r).FieldByName(field)
|
||||
if v, err := strconv.ParseBool(value); err == nil {
|
||||
fd.Set(reflect.ValueOf(map[string]bool{operator: v}))
|
||||
} else {
|
||||
fd.Set(reflect.ValueOf(map[string]string{operator: value}))
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Common Elements
|
||||
|
||||
// Link contains a download URL for opening files
|
||||
type Link struct {
|
||||
URL string `json:"url"`
|
||||
Token string `json:"token"`
|
||||
Expire Time `json:"expire"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
||||
func (l *Link) Valid() bool {
|
||||
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
|
||||
}
|
||||
|
||||
// URL is a basic form of URL
|
||||
type URL struct {
|
||||
Kind string `json:"kind,omitempty"` // e.g. "upload#url"
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Base Elements
|
||||
|
||||
// FileList contains a list of File elements
|
||||
type FileList struct {
|
||||
Kind string `json:"kind,omitempty"` // drive#fileList
|
||||
Files []*File `json:"files,omitempty"`
|
||||
NextPageToken string `json:"next_page_token"`
|
||||
Version string `json:"version,omitempty"`
|
||||
VersionOutdated bool `json:"version_outdated,omitempty"`
|
||||
}
|
||||
|
||||
// File is a basic element representing a single file object
|
||||
//
|
||||
// There are two types of download links,
|
||||
// 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and
|
||||
// 2) the other from File.Medias[].Link.URL.
|
||||
// Empirically, 2) is less restrictive to multiple concurrent range-requests
|
||||
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
|
||||
// However, it is not generally applicable as it is only for meadia.
|
||||
type File struct {
|
||||
Apps []*FileApp `json:"apps,omitempty"`
|
||||
Audit *FileAudit `json:"audit,omitempty"`
|
||||
Collection string `json:"collection,omitempty"` // TODO
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
DeleteTime Time `json:"delete_time,omitempty"`
|
||||
FileCategory string `json:"file_category,omitempty"`
|
||||
FileExtension string `json:"file_extension,omitempty"`
|
||||
FolderType string `json:"folder_type,omitempty"`
|
||||
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Links *FileLinks `json:"links,omitempty"`
|
||||
Md5Checksum string `json:"md5_checksum,omitempty"`
|
||||
Medias []*Media `json:"medias,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
|
||||
Name string `json:"name,omitempty"`
|
||||
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
Params *FileParams `json:"params,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
Phase string `json:"phase,omitempty"`
|
||||
Revision int `json:"revision,omitempty,string"`
|
||||
Size int64 `json:"size,omitempty,string"`
|
||||
SortName string `json:"sort_name,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
|
||||
Starred bool `json:"starred,omitempty"`
|
||||
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
||||
Trashed bool `json:"trashed,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
UserModifiedTime Time `json:"user_modified_time,omitempty"`
|
||||
WebContentLink string `json:"web_content_link,omitempty"`
|
||||
Writable bool `json:"writable,omitempty"`
|
||||
}
|
||||
|
||||
// FileLinks includes links to file at backend
|
||||
type FileLinks struct {
|
||||
ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"`
|
||||
}
|
||||
|
||||
// FileAudit contains audit information for the file
|
||||
type FileAudit struct {
|
||||
Status string `json:"status,omitempty"` // "STATUS_OK"
|
||||
Message string `json:"message,omitempty"`
|
||||
Title string `json:"title,omitempty"`
|
||||
}
|
||||
|
||||
// Media contains info about supported version of media, e.g. original, transcoded, etc
|
||||
type Media struct {
|
||||
MediaID string `json:"media_id,omitempty"`
|
||||
MediaName string `json:"media_name,omitempty"`
|
||||
Video struct {
|
||||
Height int `json:"height,omitempty"`
|
||||
Width int `json:"width,omitempty"`
|
||||
Duration int64 `json:"duration,omitempty"`
|
||||
BitRate int `json:"bit_rate,omitempty"`
|
||||
FrameRate int `json:"frame_rate,omitempty"`
|
||||
VideoCodec string `json:"video_codec,omitempty"` // "h264", "hevc"
|
||||
AudioCodec string `json:"audio_codec,omitempty"` // "pcm_bluray", "aac"
|
||||
VideoType string `json:"video_type,omitempty"` // "mpegts"
|
||||
HdrType string `json:"hdr_type,omitempty"`
|
||||
} `json:"video,omitempty"`
|
||||
Link *Link `json:"link,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
IsOrigin bool `json:"is_origin,omitempty"`
|
||||
ResolutionName string `json:"resolution_name,omitempty"`
|
||||
IsVisible bool `json:"is_visible,omitempty"`
|
||||
Category string `json:"category,omitempty"`
|
||||
}
|
||||
|
||||
// FileParams includes parameters for instant open
|
||||
type FileParams struct {
|
||||
Duration int64 `json:"duration,omitempty,string"` // in seconds
|
||||
Height int `json:"height,omitempty,string"`
|
||||
Platform string `json:"platform,omitempty"` // "Upload"
|
||||
PlatformIcon string `json:"platform_icon,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Width int `json:"width,omitempty,string"`
|
||||
}
|
||||
|
||||
// FileApp includes parameters for instant open
|
||||
type FileApp struct {
|
||||
ID string `json:"id,omitempty"` // "decompress" for rar files
|
||||
Name string `json:"name,omitempty"` // decompress" for rar files
|
||||
Access []interface{} `json:"access,omitempty"`
|
||||
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Params struct {
|
||||
} `json:"params,omitempty"` // TODO
|
||||
CategoryIds []interface{} `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct {
|
||||
} `json:"links,omitempty"` // TODO
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// TaskList contains a list of Task elements
|
||||
type TaskList struct {
|
||||
Tasks []*Task `json:"tasks,omitempty"` // "drive#task"
|
||||
NextPageToken string `json:"next_page_token"`
|
||||
ExpiresIn int `json:"expires_in,omitempty"`
|
||||
}
|
||||
|
||||
// Task is a basic element representing a single task such as offline download and upload
|
||||
type Task struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#task"
|
||||
ID string `json:"id,omitempty"` // task id?
|
||||
Name string `json:"name,omitempty"` // torrent name?
|
||||
Type string `json:"type,omitempty"` // "offline"
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
Statuses []interface{} `json:"statuses,omitempty"` // TODO
|
||||
StatusSize int `json:"status_size,omitempty"` // TODO
|
||||
Params *TaskParams `json:"params,omitempty"` // TODO
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
FileName string `json:"file_name,omitempty"`
|
||||
FileSize string `json:"file_size,omitempty"`
|
||||
Message string `json:"message,omitempty"` // e.g. "Saving"
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
UpdatedTime Time `json:"updated_time,omitempty"`
|
||||
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
||||
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
||||
Progress int `json:"progress,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Callback string `json:"callback,omitempty"`
|
||||
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
|
||||
Space string `json:"space,omitempty"`
|
||||
}
|
||||
|
||||
// TaskParams includes parameters informing status of Task
|
||||
type TaskParams struct {
|
||||
Age string `json:"age,omitempty"`
|
||||
PredictSpeed string `json:"predict_speed,omitempty"`
|
||||
PredictType string `json:"predict_type,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// Form contains parameters for upload by multipart/form-data
|
||||
type Form struct {
|
||||
Headers struct{} `json:"headers"`
|
||||
Kind string `json:"kind"` // "drive#form"
|
||||
Method string `json:"method"` // "POST"
|
||||
MultiParts struct {
|
||||
OSSAccessKeyID string `json:"OSSAccessKeyId"`
|
||||
Signature string `json:"Signature"`
|
||||
Callback string `json:"callback"`
|
||||
Key string `json:"key"`
|
||||
Policy string `json:"policy"`
|
||||
XUserData string `json:"x:user_data"`
|
||||
} `json:"multi_parts"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// Resumable contains parameters for upload by resumable
|
||||
type Resumable struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#resumable"
|
||||
Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN"
|
||||
Params *ResumableParams `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// ResumableParams specifies resumable paramegers
|
||||
type ResumableParams struct {
|
||||
AccessKeyID string `json:"access_key_id,omitempty"`
|
||||
AccessKeySecret string `json:"access_key_secret,omitempty"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Expiration Time `json:"expiration,omitempty"`
|
||||
Key string `json:"key,omitempty"`
|
||||
SecurityToken string `json:"security_token,omitempty"`
|
||||
}
|
||||
|
||||
// FileInArchive is a basic element in archive
|
||||
type FileInArchive struct {
|
||||
Index int `json:"index,omitempty"`
|
||||
Filename string `json:"filename,omitempty"`
|
||||
Filesize string `json:"filesize,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
Gcid string `json:"gcid,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFile is a response to RequestNewFile
|
||||
type NewFile struct {
|
||||
File *File `json:"file,omitempty"`
|
||||
Form *Form `json:"form,omitempty"`
|
||||
Resumable *Resumable `json:"resumable,omitempty"`
|
||||
Task *Task `json:"task,omitempty"` // null in this case
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
}
|
||||
|
||||
// NewTask is a response to RequestNewTask
|
||||
type NewTask struct {
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
|
||||
File *File `json:"file,omitempty"` // null in this case
|
||||
Task *Task `json:"task,omitempty"`
|
||||
URL *URL `json:"url,omitempty"` // {"kind": "upload#url"}
|
||||
}
|
||||
|
||||
// About informs drive status
|
||||
type About struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#about"
|
||||
Quota *Quota `json:"quota,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"`
|
||||
Quotas struct {
|
||||
} `json:"quotas,omitempty"` // maybe []*Quota?
|
||||
}
|
||||
|
||||
// Quota informs drive quota
|
||||
type Quota struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#quota"
|
||||
Limit int64 `json:"limit,omitempty,string"` // limit in bytes
|
||||
Usage int64 `json:"usage,omitempty,string"` // bytes in use
|
||||
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
|
||||
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
|
||||
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
|
||||
}
|
||||
|
||||
// Share is a response to RequestShare
|
||||
//
|
||||
// used in PublicLink()
|
||||
type Share struct {
|
||||
ShareID string `json:"share_id,omitempty"`
|
||||
ShareURL string `json:"share_url,omitempty"`
|
||||
PassCode string `json:"pass_code,omitempty"`
|
||||
ShareText string `json:"share_text,omitempty"`
|
||||
}
|
||||
|
||||
// User contains user account information
|
||||
//
|
||||
// GET https://user.mypikpak.com/v1/user/me
|
||||
type User struct {
|
||||
Sub string `json:"sub,omitempty"` // userid for internal use
|
||||
Name string `json:"name,omitempty"` // Username
|
||||
Picture string `json:"picture,omitempty"` // URL to Avatar image
|
||||
Email string `json:"email,omitempty"` // redacted email address
|
||||
Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider
|
||||
PhoneNumber string `json:"phone_number,omitempty"`
|
||||
Password string `json:"password,omitempty"` // "SET" if configured
|
||||
Status string `json:"status,omitempty"` // "ACTIVE"
|
||||
CreatedAt Time `json:"created_at,omitempty"`
|
||||
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
|
||||
}
|
||||
|
||||
// UserProvider details third-party authentication
|
||||
type UserProvider struct {
|
||||
ID string `json:"id,omitempty"` // e.g. "google.com"
|
||||
ProviderUserID string `json:"provider_user_id,omitempty"`
|
||||
Name string `json:"name,omitempty"` // username
|
||||
}
|
||||
|
||||
// VIP includes subscription details about premium account
|
||||
//
|
||||
// GET https://api-drive.mypikpak.com/drive/v1/privilege/vip
|
||||
type VIP struct {
|
||||
Result string `json:"result,omitempty"` // "ACCEPTED"
|
||||
Message string `json:"message,omitempty"`
|
||||
RedirectURI string `json:"redirect_uri,omitempty"`
|
||||
Data struct {
|
||||
Expire Time `json:"expire,omitempty"`
|
||||
Status string `json:"status,omitempty"` // "invalid" or "ok"
|
||||
Type string `json:"type,omitempty"` // "novip" or "platinum"
|
||||
UserID string `json:"user_id,omitempty"` // same as User.Sub
|
||||
} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// DecompressResult is a response to RequestDecompress
|
||||
type DecompressResult struct {
|
||||
Status string `json:"status,omitempty"` // "OK"
|
||||
StatusText string `json:"status_text,omitempty"`
|
||||
TaskID string `json:"task_id,omitempty"` // same as File.Id
|
||||
FilesNum int `json:"files_num,omitempty"` // number of files in archive
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// RequestShare is to request for file share
|
||||
type RequestShare struct {
|
||||
FileIds []string `json:"file_ids,omitempty"`
|
||||
ShareTo string `json:"share_to,omitempty"` // "publiclink",
|
||||
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
|
||||
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
|
||||
}
|
||||
|
||||
// RequestBatch is to request for batch actions
|
||||
type RequestBatch struct {
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
To map[string]string `json:"to,omitempty"`
|
||||
}
|
||||
|
||||
// RequestNewFile is to request for creating a new `drive#folder` or `drive#file`
|
||||
type RequestNewFile struct {
|
||||
// always required
|
||||
Kind string `json:"kind"` // "drive#folder" or "drive#file"
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parent_id"`
|
||||
FolderType string `json:"folder_type"`
|
||||
// only when uploading a new file
|
||||
Hash string `json:"hash,omitempty"` // sha1sum
|
||||
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
|
||||
Size int64 `json:"size,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
}
|
||||
|
||||
// RequestNewTask is to request for creating a new task like offline downloads
|
||||
//
|
||||
// Name and ParentID can be left empty.
|
||||
type RequestNewTask struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Name string `json:"name,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
|
||||
URL *URL `json:"url,omitempty"` // {"url": downloadUrl}
|
||||
FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD"
|
||||
}
|
||||
|
||||
// RequestDecompress is to request for decompress of archive files
|
||||
type RequestDecompress struct {
|
||||
Gcid string `json:"gcid,omitempty"` // same as File.Hash
|
||||
Password string `json:"password,omitempty"` // ""
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed
|
||||
DefaultParent bool `json:"default_parent,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NOT implemented YET
|
||||
|
||||
// RequestArchiveFileList is to request for a list of files in archive
|
||||
//
|
||||
// POST https://api-drive.mypikpak.com/decompress/v1/list
|
||||
type RequestArchiveFileList struct {
|
||||
Gcid string `json:"gcid,omitempty"` // same as api.File.Hash
|
||||
Path string `json:"path,omitempty"` // "" by default
|
||||
Password string `json:"password,omitempty"` // "" by default
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
}
|
||||
|
||||
// ArchiveFileList is a response to RequestArchiveFileList
|
||||
type ArchiveFileList struct {
|
||||
Status string `json:"status,omitempty"` // "OK"
|
||||
StatusText string `json:"status_text,omitempty"` // ""
|
||||
TaskID string `json:"task_id,omitempty"` // ""
|
||||
CurrentPath string `json:"current_path,omitempty"` // ""
|
||||
Title string `json:"title,omitempty"`
|
||||
FileSize int64 `json:"file_size,omitempty"`
|
||||
Gcid string `json:"gcid,omitempty"` // same as File.Hash
|
||||
Files []*FileInArchive `json:"files,omitempty"`
|
||||
}
|
||||
253
backend/pikpak/helper.go
Normal file
253
backend/pikpak/helper.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package pikpak
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
cachePrefix = "rclone-pikpak-sha1sum-"
|
||||
)
|
||||
|
||||
// requestDecompress requests decompress of compressed files
|
||||
func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) {
|
||||
req := &api.RequestDecompress{
|
||||
Gcid: file.Hash,
|
||||
Password: password,
|
||||
FileID: file.ID,
|
||||
Files: []*api.FileInArchive{},
|
||||
DefaultParent: true,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/decompress/v1/decompress",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getUserInfo gets UserInfo from API
|
||||
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://user.mypikpak.com/v1/user/me",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get userinfo: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getVIPInfo gets VIPInfo from API
|
||||
func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get vip info: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// requestBatchAction requests batch actions to API
|
||||
//
|
||||
// action can be one of batch{Copy,Delete,Trash,Untrash}
|
||||
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files:" + action,
|
||||
NoResponse: true, // Only returns `{"task_id":""}
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch action %q failed: %w", action, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requestNewTask requests a new api.NewTask and returns api.Task
|
||||
func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files",
|
||||
}
|
||||
var newTask api.NewTask
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTask.Task, nil
|
||||
}
|
||||
|
||||
// requestNewFile requests a new api.NewFile and returns api.File
|
||||
func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getFile gets api.File from API for the ID passed
|
||||
// and returns rich information containing additional fields below
|
||||
// * web_content_link
|
||||
// * thumbnail_link
|
||||
// * links
|
||||
// * medias
|
||||
func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/files/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
if err == nil && info.Phase != api.PhaseTypeComplete {
|
||||
// could be pending right after file is created/uploaded.
|
||||
return true, errors.New("not PHASE_TYPE_COMPLETE")
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// patchFile updates attributes of the file by ID
|
||||
//
|
||||
// currently known patchable fields are
|
||||
// * name
|
||||
func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/drive/v1/files/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getAbout gets drive#quota information from server
|
||||
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/about",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// requestShare returns information about ssharable links
|
||||
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/share",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Read the sha1 of in returning a reader which will read the same contents
|
||||
//
|
||||
// The cleanup function should be called when out is finished with
|
||||
// regardless of whether this function returned an error or not.
|
||||
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need an SHA1
|
||||
hash := sha1.New()
|
||||
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
|
||||
teeReader := io.TeeReader(in, hash)
|
||||
|
||||
// nothing to clean up by default
|
||||
cleanup = func() {}
|
||||
|
||||
// don't cache small files on disk to reduce wear of the disk
|
||||
if size > threshold {
|
||||
var tempFile *os.File
|
||||
|
||||
// create the cache file
|
||||
tempFile, err = os.CreateTemp("", cachePrefix)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
|
||||
|
||||
// clean up the file after we are done downloading
|
||||
cleanup = func() {
|
||||
// the file should normally already be close, but just to make sure
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
|
||||
}
|
||||
|
||||
// copy the ENTIRE file to disc and calculate the SHA1 in the process
|
||||
if _, err = io.Copy(tempFile, teeReader); err != nil {
|
||||
return
|
||||
}
|
||||
// jump to the start of the local file so we can pass it along
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// replace the already read source with a reader of our cached file
|
||||
out = tempFile
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// set the reader to our read memory block
|
||||
out = bytes.NewReader(inData)
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
|
||||
}
|
||||
1718
backend/pikpak/pikpak.go
Normal file
1718
backend/pikpak/pikpak.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/pikpak/pikpak_test.go
Normal file
17
backend/pikpak/pikpak_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test PikPak filesystem interface
|
||||
package pikpak_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPikPak:",
|
||||
NilObject: (*pikpak.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -82,14 +82,15 @@ func init() {
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "api_key",
|
||||
Help: `API Key.
|
||||
|
||||
This is not normally used - use oauth instead.
|
||||
`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -99,7 +100,7 @@ This is not normally used - use oauth instead.
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
1063
backend/protondrive/protondrive.go
Normal file
1063
backend/protondrive/protondrive.go
Normal file
File diff suppressed because it is too large
Load Diff
16
backend/protondrive/protondrive_test.go
Normal file
16
backend/protondrive/protondrive_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package protondrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/protondrive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestProtonDrive:",
|
||||
NilObject: (*protondrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
@@ -252,9 +253,12 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return f.putUnchecked(ctx, in, src, src.Remote(), options...)
|
||||
}
|
||||
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -540,24 +544,59 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
var resp struct {
|
||||
File putio.File `json:"file"`
|
||||
}
|
||||
// For some unknown reason the API sometimes returns the name
|
||||
// already exists unless we upload to a temporary name and
|
||||
// rename
|
||||
//
|
||||
// {"error_id":null,"error_message":"Name already exist","error_type":"NAME_ALREADY_EXIST","error_uri":"http://api.put.io/v2/docs","extra":{},"status":"ERROR","status_code":400}
|
||||
suffix := "." + random.String(8)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf+suffix))
|
||||
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
_, err = f.client.Do(req, &resp)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/rename", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
_, err = f.client.Do(req, &resp)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err = f.newObjectWithInfo(ctx, remote, resp.File)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.SetModTime(ctx, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
@@ -579,6 +618,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
@@ -596,7 +636,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
o, err = f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.SetModTime(ctx, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user