mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
1246 Commits
fix-tests
...
v1.63-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bd1fbcae12 | ||
|
|
7aa097c8ad | ||
|
|
10171be395 | ||
|
|
80ff676f78 | ||
|
|
9410ccb27a | ||
|
|
3cd07af85b | ||
|
|
cec75c873f | ||
|
|
07d85297c0 | ||
|
|
9aa714e659 | ||
|
|
320c95048d | ||
|
|
19dba3bf28 | ||
|
|
842df59419 | ||
|
|
73d1b72bd7 | ||
|
|
a3cfe5f7fe | ||
|
|
9d3b8d9a9f | ||
|
|
73e66a3798 | ||
|
|
f150e27abe | ||
|
|
a72e082e6b | ||
|
|
fcfbd3153b | ||
|
|
9a8075b682 | ||
|
|
996037bee9 | ||
|
|
e90537b2e9 | ||
|
|
42c211c6b2 | ||
|
|
3d4f127b33 | ||
|
|
ff966b37af | ||
|
|
3b6effa81a | ||
|
|
8308d5d640 | ||
|
|
14024936a8 | ||
|
|
9065e921c1 | ||
|
|
99788b605e | ||
|
|
d4cc3760e6 | ||
|
|
a6acbd1844 | ||
|
|
389565f5e2 | ||
|
|
4b4198522d | ||
|
|
f7665300c0 | ||
|
|
73beae147f | ||
|
|
92f8e476b7 | ||
|
|
5849148d51 | ||
|
|
37853ec412 | ||
|
|
ae7ff28714 | ||
|
|
9873f4bc74 | ||
|
|
1b200bf69a | ||
|
|
e3fa6fe3cc | ||
|
|
9e1b3861e7 | ||
|
|
e9a753f678 | ||
|
|
708391a5bf | ||
|
|
1cfed18aa7 | ||
|
|
7751d5a00b | ||
|
|
8274712c2c | ||
|
|
625a564ba3 | ||
|
|
2dd2072cdb | ||
|
|
998d1d1727 | ||
|
|
fcb912a664 | ||
|
|
5f938fb9ed | ||
|
|
72b79504ea | ||
|
|
3e2a606adb | ||
|
|
95a6e3e338 | ||
|
|
d06bb55f3f | ||
|
|
9f3694cea3 | ||
|
|
2c50f26c36 | ||
|
|
22d6c8d30d | ||
|
|
96fb75c5a7 | ||
|
|
acd67edf9a | ||
|
|
b26db8e640 | ||
|
|
da955e5d4f | ||
|
|
4f8dab8bce | ||
|
|
000ddc4951 | ||
|
|
3faa84b47c | ||
|
|
e1162ec440 | ||
|
|
30cccc7101 | ||
|
|
1f5a29209e | ||
|
|
45255bccb3 | ||
|
|
055206c4ee | ||
|
|
f3070b82bc | ||
|
|
7e2deffc62 | ||
|
|
ae3ff50580 | ||
|
|
6486ba6344 | ||
|
|
7842000f8a | ||
|
|
1f9c962183 | ||
|
|
279d9ecc56 | ||
|
|
31773ecfbf | ||
|
|
666e34cf69 | ||
|
|
5a84a08b3f | ||
|
|
51a468b2ba | ||
|
|
fc798d800c | ||
|
|
3115ede1d8 | ||
|
|
7a5491ba7b | ||
|
|
a6cf4989b6 | ||
|
|
f489b54fa0 | ||
|
|
6244d1729b | ||
|
|
e97c2a2832 | ||
|
|
56bf9b4a10 | ||
|
|
ceb9406c2f | ||
|
|
1f887f7ba0 | ||
|
|
7db26b6b34 | ||
|
|
37a3309438 | ||
|
|
97be9015a4 | ||
|
|
487e4f09b3 | ||
|
|
09a408664d | ||
|
|
43fa256d56 | ||
|
|
6859c04772 | ||
|
|
38a0539096 | ||
|
|
2cd85813b4 | ||
|
|
e6e6069ecf | ||
|
|
fcf47a8393 | ||
|
|
46a323ae14 | ||
|
|
72be80ddca | ||
|
|
a9e7e7bcc2 | ||
|
|
925c4382e2 | ||
|
|
08c60c3091 | ||
|
|
5c594fea90 | ||
|
|
cc01223535 | ||
|
|
aaacfa51a0 | ||
|
|
c18c66f167 | ||
|
|
d6667d34e7 | ||
|
|
e649cf4d50 | ||
|
|
f080ec437c | ||
|
|
4023eaebe0 | ||
|
|
baf16a65f0 | ||
|
|
70fe2ac852 | ||
|
|
41cf7faea4 | ||
|
|
f226f2dfb1 | ||
|
|
31caa019fa | ||
|
|
0468375054 | ||
|
|
6001f05a12 | ||
|
|
f7b87a8049 | ||
|
|
d379641021 | ||
|
|
84281c9089 | ||
|
|
8e2dc069d2 | ||
|
|
61d6f538b3 | ||
|
|
65b2e378e0 | ||
|
|
dea6bdf3df | ||
|
|
27eb8c7f45 | ||
|
|
1607344613 | ||
|
|
5f138dd822 | ||
|
|
2520c05c4b | ||
|
|
f7f5e87632 | ||
|
|
a7e6806f26 | ||
|
|
d0eb884262 | ||
|
|
ae6874170f | ||
|
|
f5bab284c3 | ||
|
|
c75dfa6436 | ||
|
|
56eb82bdfc | ||
|
|
066e00b470 | ||
|
|
e0c445d36e | ||
|
|
74652bf318 | ||
|
|
b6a95c70e9 | ||
|
|
aca7d0fd22 | ||
|
|
12761b3058 | ||
|
|
3567a47258 | ||
|
|
6b670bd439 | ||
|
|
335ca6d572 | ||
|
|
c4a9e480c9 | ||
|
|
232d304c13 | ||
|
|
44ac79e357 | ||
|
|
0487e465ee | ||
|
|
bb6cfe109d | ||
|
|
864eb89a67 | ||
|
|
4471e6f258 | ||
|
|
e82db0b7d5 | ||
|
|
72e624c5e4 | ||
|
|
6092fa57c3 | ||
|
|
3e15a594b7 | ||
|
|
db8c007983 | ||
|
|
5836da14c2 | ||
|
|
8ed07d11a0 | ||
|
|
1f2ee44c20 | ||
|
|
32798dca25 | ||
|
|
075f98551f | ||
|
|
963ab220f6 | ||
|
|
281a007b1a | ||
|
|
589b7b4873 | ||
|
|
04d2781fda | ||
|
|
5b95fd9588 | ||
|
|
a42643101e | ||
|
|
bcca67efd5 | ||
|
|
7771aaacf6 | ||
|
|
fda06fc17d | ||
|
|
2faa4758e4 | ||
|
|
9a9ef040e3 | ||
|
|
ca403dc90e | ||
|
|
451f4c2a8f | ||
|
|
5f6b105c3e | ||
|
|
d98837b7e6 | ||
|
|
99dd748fec | ||
|
|
bdfe213c47 | ||
|
|
52fbb10b47 | ||
|
|
6cb584f455 | ||
|
|
ec8bbb8d30 | ||
|
|
fcdffab480 | ||
|
|
aeb568c494 | ||
|
|
b07f575d07 | ||
|
|
ebae647dfa | ||
|
|
6fd5b469bc | ||
|
|
78e822dd79 | ||
|
|
a79db20bcd | ||
|
|
d67ef19f6e | ||
|
|
037a6bd1b0 | ||
|
|
09b884aade | ||
|
|
243bcc9d07 | ||
|
|
64cf9ac911 | ||
|
|
15a3ec8fa1 | ||
|
|
2b8af4d23f | ||
|
|
5755e31ef0 | ||
|
|
f4c787ab74 | ||
|
|
4d7b6e14b8 | ||
|
|
9ea7d143dd | ||
|
|
927e721a25 | ||
|
|
bd46f01eb4 | ||
|
|
5f4d7154c0 | ||
|
|
bad8a01850 | ||
|
|
d808c3848a | ||
|
|
3f0bec2ee9 | ||
|
|
8fb9eb2fee | ||
|
|
01fa15a7d9 | ||
|
|
6aaa5d7a75 | ||
|
|
b4d3411637 | ||
|
|
01ddc8ca6c | ||
|
|
16c1e7149e | ||
|
|
0374ea2c79 | ||
|
|
2e2451f8ec | ||
|
|
bd1e3448b3 | ||
|
|
20909fa294 | ||
|
|
c502e00c87 | ||
|
|
9172c9b3dd | ||
|
|
78deab05f9 | ||
|
|
6c9d377bbb | ||
|
|
62ddc9b7f9 | ||
|
|
448ae49fa4 | ||
|
|
5f3c276d0a | ||
|
|
9cea493f58 | ||
|
|
400d1a4468 | ||
|
|
851ce0f4fe | ||
|
|
cc885bd39a | ||
|
|
a1a8c21c70 | ||
|
|
6ef4bd8c45 | ||
|
|
fb316123ec | ||
|
|
270af61665 | ||
|
|
155f4f2e21 | ||
|
|
eaf593884b | ||
|
|
930574c6e9 | ||
|
|
c1586a9866 | ||
|
|
432eb74814 | ||
|
|
92fb644fb6 | ||
|
|
bb92af693a | ||
|
|
eb5fd07131 | ||
|
|
b2ce7c9aa6 | ||
|
|
d6b46e41dd | ||
|
|
254c6ef1dd | ||
|
|
547f943851 | ||
|
|
8611c9f6f7 | ||
|
|
f6576237a4 | ||
|
|
207b64865e | ||
|
|
9ee1b21ec2 | ||
|
|
55a12bd639 | ||
|
|
3b4a57dab9 | ||
|
|
afe158f878 | ||
|
|
722a3f32cc | ||
|
|
9183618082 | ||
|
|
18ebca3979 | ||
|
|
e84d2c9e5f | ||
|
|
e98b61ceeb | ||
|
|
19f9fca2f6 | ||
|
|
7dbf1ab66f | ||
|
|
bfe272bf67 | ||
|
|
cce8936802 | ||
|
|
043bf3567d | ||
|
|
1b2f2c0d69 | ||
|
|
4b376514a6 | ||
|
|
c27e6a89b0 | ||
|
|
76c6e3b15c | ||
|
|
48ec00cc1a | ||
|
|
866600a73b | ||
|
|
d8f4cd4d5f | ||
|
|
d0810b602a | ||
|
|
d5afcf9e34 | ||
|
|
07c4d95f38 | ||
|
|
fd83071b6b | ||
|
|
e042d9089f | ||
|
|
cdfa0beafb | ||
|
|
ddb3b17e96 | ||
|
|
32f71c97ea | ||
|
|
53853116fb | ||
|
|
a887856998 | ||
|
|
0df7466d2b | ||
|
|
23579e3b99 | ||
|
|
3affba6fa6 | ||
|
|
542677d807 | ||
|
|
d481aa8613 | ||
|
|
15e633fa8b | ||
|
|
732c24c624 | ||
|
|
75dfdbf211 | ||
|
|
5f07113a4b | ||
|
|
6a380bcc67 | ||
|
|
97276ce765 | ||
|
|
a23a7a807f | ||
|
|
c6a4caaf7e | ||
|
|
5574733dcb | ||
|
|
49c21d0b6e | ||
|
|
0ea2ce3674 | ||
|
|
3ddf824251 | ||
|
|
68fdff3c27 | ||
|
|
c003485ae3 | ||
|
|
99d5080191 | ||
|
|
2ad217eedd | ||
|
|
a3eb7f1142 | ||
|
|
6d620b6d88 | ||
|
|
9f8357ada7 | ||
|
|
e5a1bcb1ce | ||
|
|
46484022b0 | ||
|
|
ab746ef891 | ||
|
|
6241c1ae43 | ||
|
|
0f8d3fe6a3 | ||
|
|
07afb9e700 | ||
|
|
3165093feb | ||
|
|
4af0c1d902 | ||
|
|
82f9554474 | ||
|
|
d8d53b7aa0 | ||
|
|
8c9048259a | ||
|
|
0361acbde4 | ||
|
|
f5bf0a48f3 | ||
|
|
cec843dd8c | ||
|
|
54a9488e59 | ||
|
|
29fe0177bd | ||
|
|
0e134364ac | ||
|
|
0d8350d95d | ||
|
|
497e373e31 | ||
|
|
ed8fea4aa5 | ||
|
|
4d7f75dd76 | ||
|
|
53e757aea9 | ||
|
|
f578896745 | ||
|
|
13be03cb86 | ||
|
|
864e02409e | ||
|
|
fccc779a15 | ||
|
|
77c7077458 | ||
|
|
ffd4ab222c | ||
|
|
676277e255 | ||
|
|
c0a5283416 | ||
|
|
e405ca7733 | ||
|
|
580d72f0f6 | ||
|
|
22daeaa6f3 | ||
|
|
ca9ad7935a | ||
|
|
dd6e229327 | ||
|
|
4edcd16f5f | ||
|
|
534e3acd06 | ||
|
|
cf75ddabd3 | ||
|
|
6edcacf932 | ||
|
|
51506a7ccd | ||
|
|
a50fd2a2a2 | ||
|
|
efac7e18fb | ||
|
|
02dd8eacea | ||
|
|
e2984227bb | ||
|
|
a35ee30d9f | ||
|
|
f689db4422 | ||
|
|
fb4600f6f9 | ||
|
|
1d0c75b0c2 | ||
|
|
2e435af4de | ||
|
|
62a7765e57 | ||
|
|
5ad942ed87 | ||
|
|
96609e3d6e | ||
|
|
28a8ebce5b | ||
|
|
17854663de | ||
|
|
a4a6b5930a | ||
|
|
e9ae620844 | ||
|
|
e7cfb8ad8e | ||
|
|
786a1c212c | ||
|
|
03bc270730 | ||
|
|
7cef042231 | ||
|
|
1155cc0d3f | ||
|
|
13c3f67ab0 | ||
|
|
ab2cdd840f | ||
|
|
143285e2b7 | ||
|
|
19e8c8d42a | ||
|
|
de9c4a3611 | ||
|
|
d7ad13d929 | ||
|
|
f9d50f677d | ||
|
|
3641993fab | ||
|
|
93d3ae04c7 | ||
|
|
e25e9fbf22 | ||
|
|
fe26d6116d | ||
|
|
06e1e18793 | ||
|
|
23d17b76be | ||
|
|
dfe4e78a77 | ||
|
|
59e7982040 | ||
|
|
c6b0587dc0 | ||
|
|
9baa4d1c3c | ||
|
|
a5390dbbeb | ||
|
|
019a486d5b | ||
|
|
34ce11d2be | ||
|
|
88e8ede0aa | ||
|
|
f6f250c507 | ||
|
|
2c45e901f0 | ||
|
|
9e1443799a | ||
|
|
dd72aff98a | ||
|
|
5039f9be48 | ||
|
|
56b582cdb9 | ||
|
|
745c0af571 | ||
|
|
2dabbe83ac | ||
|
|
90561176fb | ||
|
|
a0b5d77427 | ||
|
|
ce8b1cd861 | ||
|
|
5bd6e3d1e9 | ||
|
|
d4d7a6a55e | ||
|
|
b3e0672535 | ||
|
|
a407437e92 | ||
|
|
0164a4e686 | ||
|
|
b8ea79042c | ||
|
|
49a6533bc1 | ||
|
|
21459f3cc0 | ||
|
|
04f7e52803 | ||
|
|
25535e5eac | ||
|
|
c37b6b1a43 | ||
|
|
0328878e46 | ||
|
|
67132ecaec | ||
|
|
120cfcde70 | ||
|
|
37db2a0e44 | ||
|
|
f92816899c | ||
|
|
5386ffc8f2 | ||
|
|
3898d534f3 | ||
|
|
34333d9fa8 | ||
|
|
14e852ee9d | ||
|
|
37623732c6 | ||
|
|
adbcc83fa5 | ||
|
|
d4ea6632ca | ||
|
|
21849fd0d9 | ||
|
|
ac20ee41ca | ||
|
|
d376fb1df2 | ||
|
|
8e63a08d7f | ||
|
|
3aee5b3c55 | ||
|
|
0145d98314 | ||
|
|
4c03c71a5f | ||
|
|
82e2801aae | ||
|
|
dc5d5de35c | ||
|
|
41cc4530f3 | ||
|
|
c5acb10151 | ||
|
|
8c8ee9905c | ||
|
|
e2afd00118 | ||
|
|
5b82576dbf | ||
|
|
b9d9f9edb0 | ||
|
|
c40b706186 | ||
|
|
351fc609b1 | ||
|
|
a6f6a9dcdf | ||
|
|
267a09001d | ||
|
|
37db2abecd | ||
|
|
0272d44192 | ||
|
|
6b17044f8e | ||
|
|
844e8fb8bd | ||
|
|
ca9182d6ae | ||
|
|
ec20c48523 | ||
|
|
ec68b72387 | ||
|
|
2d1c2725e4 | ||
|
|
1680c5af8f | ||
|
|
88c0d78639 | ||
|
|
559157cb58 | ||
|
|
10bf8a769e | ||
|
|
f31ab6d178 | ||
|
|
f08bb5bf66 | ||
|
|
e2886aaddf | ||
|
|
71227986db | ||
|
|
8c6ff1fa7e | ||
|
|
9d1b786a39 | ||
|
|
8ee0e2efb1 | ||
|
|
d66f5e8db0 | ||
|
|
02d6d28ec4 | ||
|
|
1cafc12e8c | ||
|
|
98fa93f6d1 | ||
|
|
c6c67a29eb | ||
|
|
ad5395e953 | ||
|
|
1925ceaade | ||
|
|
8aebf12797 | ||
|
|
ffeefe8a56 | ||
|
|
81ce5e4961 | ||
|
|
638058ef91 | ||
|
|
b1b62f70d3 | ||
|
|
823d89af9a | ||
|
|
448fff9a04 | ||
|
|
6257a6035c | ||
|
|
54c0f17f2a | ||
|
|
d049cbb59e | ||
|
|
00e853144e | ||
|
|
5ac8cfee56 | ||
|
|
496ae8adf6 | ||
|
|
2001cc0831 | ||
|
|
a35490bf70 | ||
|
|
01877e5a0f | ||
|
|
614d79121a | ||
|
|
3a6f1f5cd7 | ||
|
|
4a31961c4f | ||
|
|
7be9855a70 | ||
|
|
6f8112ff67 | ||
|
|
67fc227684 | ||
|
|
7edb4c0162 | ||
|
|
5db4493557 | ||
|
|
a85c0b0cc2 | ||
|
|
52443c2444 | ||
|
|
4444d2d102 | ||
|
|
08a1ca434b | ||
|
|
a9ce86f9a3 | ||
|
|
3167292c2f | ||
|
|
ec7cc2b3c3 | ||
|
|
2a2fcf1012 | ||
|
|
6d62267227 | ||
|
|
dfd8ad2fff | ||
|
|
43506f8086 | ||
|
|
ec3cee89d3 | ||
|
|
a171497a8b | ||
|
|
c6ad15e3b8 | ||
|
|
9a81885b51 | ||
|
|
3d291da0f6 | ||
|
|
43bf177ff7 | ||
|
|
c446651be8 | ||
|
|
6c407dbe15 | ||
|
|
5a59b49b6b | ||
|
|
8b9f3bbe29 | ||
|
|
8e6a469f98 | ||
|
|
f650a543ef | ||
|
|
683178a1f4 | ||
|
|
3937233e1e | ||
|
|
c571200812 | ||
|
|
04a663829b | ||
|
|
6b4a2c1c4e | ||
|
|
f73be767a4 | ||
|
|
4120dffcc1 | ||
|
|
53ff5bb205 | ||
|
|
397f428c48 | ||
|
|
c5a2c9b046 | ||
|
|
b98d7f6634 | ||
|
|
beea4d5119 | ||
|
|
8e507075d1 | ||
|
|
be783a1856 | ||
|
|
450c366403 | ||
|
|
1dbdc48a77 | ||
|
|
d7cb17848d | ||
|
|
f3c8b7a948 | ||
|
|
914fbe242c | ||
|
|
f746b2fe85 | ||
|
|
a131da2c35 | ||
|
|
60e4cb6f6f | ||
|
|
0a8b1fe5de | ||
|
|
b24c83db21 | ||
|
|
4f386a1ccd | ||
|
|
ab849b3613 | ||
|
|
10aee3926a | ||
|
|
4583b61e3d | ||
|
|
483e9e1ee3 | ||
|
|
c2dfc3e5b3 | ||
|
|
a9bd0c8de6 | ||
|
|
1628ca0d46 | ||
|
|
313493d51b | ||
|
|
6d18f60725 | ||
|
|
d74662a751 | ||
|
|
d05fd2a14f | ||
|
|
097be753ab | ||
|
|
50c9678cea | ||
|
|
7672cde4f3 | ||
|
|
a4c65532ea | ||
|
|
46b080c092 | ||
|
|
0edf6478e3 | ||
|
|
f7cdf318db | ||
|
|
6f3682c12f | ||
|
|
e3d593d40c | ||
|
|
83551bb02e | ||
|
|
430bf0d5eb | ||
|
|
dd71f5d968 | ||
|
|
7db1c506f2 | ||
|
|
959cd938bc | ||
|
|
03b07c280c | ||
|
|
705e8f2fe0 | ||
|
|
591fc3609a | ||
|
|
b4a3d1b9ed | ||
|
|
84219b95ab | ||
|
|
2c78f56d48 | ||
|
|
a61d219bcd | ||
|
|
652d3cdee4 | ||
|
|
bb1fc5b86d | ||
|
|
efd3c6449b | ||
|
|
0ac5795f8c | ||
|
|
2f77651f64 | ||
|
|
8daacc2b99 | ||
|
|
87fa9f8e46 | ||
|
|
1392793334 | ||
|
|
0e427216db | ||
|
|
0c56c46523 | ||
|
|
617c5d5e1b | ||
|
|
ec2024b907 | ||
|
|
458845ce89 | ||
|
|
57bde20acd | ||
|
|
b0248e8070 | ||
|
|
b285efb476 | ||
|
|
be6f29930b | ||
|
|
653bc23728 | ||
|
|
47b04580db | ||
|
|
919e28b8bf | ||
|
|
3a3bc5a1ae | ||
|
|
133c006c37 | ||
|
|
e455940f71 | ||
|
|
65528fd009 | ||
|
|
691159fe94 | ||
|
|
09858c0c5a | ||
|
|
5fd0abb2b9 | ||
|
|
36c37ffec1 | ||
|
|
6a5b7664f7 | ||
|
|
ebac854512 | ||
|
|
cafce96185 | ||
|
|
92ffcf9f86 | ||
|
|
64cdbb67b5 | ||
|
|
528fc899fb | ||
|
|
d452f502c3 | ||
|
|
5d6b8141ec | ||
|
|
776e5ea83a | ||
|
|
c9acc06a49 | ||
|
|
a2dca02594 | ||
|
|
210331bf61 | ||
|
|
5b5fdc6bc5 | ||
|
|
0de74864b6 | ||
|
|
7042a11875 | ||
|
|
028832ce73 | ||
|
|
c7c9356af5 | ||
|
|
3292c112c5 | ||
|
|
126d71b332 | ||
|
|
df9be72a82 | ||
|
|
6aa8f7409a | ||
|
|
10c884552c | ||
|
|
2617610741 | ||
|
|
53dd174f3d | ||
|
|
65987f5970 | ||
|
|
1fc864fb32 | ||
|
|
22abcc9fd2 | ||
|
|
178cf821de | ||
|
|
f4a571786c | ||
|
|
c0a8ffcbef | ||
|
|
76eeca9eae | ||
|
|
8114744bce | ||
|
|
db5d582404 | ||
|
|
01dbbff62e | ||
|
|
afa61e702c | ||
|
|
546dc82793 | ||
|
|
d9c4d95ab3 | ||
|
|
0fb1b75a02 | ||
|
|
38f1f5b177 | ||
|
|
0d2a62a927 | ||
|
|
b75c207208 | ||
|
|
dff223f195 | ||
|
|
d2fef05fe4 | ||
|
|
188b9f8cf1 | ||
|
|
daf3162bcf | ||
|
|
5e59e7f442 | ||
|
|
fce22c0065 | ||
|
|
bb3272e837 | ||
|
|
cb5b5635c7 | ||
|
|
66ed0ca726 | ||
|
|
b16e50851a | ||
|
|
90d23139f6 | ||
|
|
5ea9398b63 | ||
|
|
3f804224f4 | ||
|
|
cf0bf159ab | ||
|
|
6654b66114 | ||
|
|
9bf78d0373 | ||
|
|
0c1fb8b2b7 | ||
|
|
966654e23a | ||
|
|
13b65104eb | ||
|
|
4a35aff33c | ||
|
|
09b6d939f5 | ||
|
|
4e79de106a | ||
|
|
b437d9461a | ||
|
|
910af597a1 | ||
|
|
c10965ecfb | ||
|
|
5efb880772 | ||
|
|
6c3b7d5820 | ||
|
|
c5109408c0 | ||
|
|
a3c06b9bbe | ||
|
|
2aa264b33c | ||
|
|
4e078765f9 | ||
|
|
7fbc928a19 | ||
|
|
27096323db | ||
|
|
7e547822d6 | ||
|
|
67625b1dbd | ||
|
|
88086643f7 | ||
|
|
5f13d84135 | ||
|
|
07efdb55fa | ||
|
|
fb6ddd680c | ||
|
|
bc09105d2e | ||
|
|
4f374bc264 | ||
|
|
1c99661d8c | ||
|
|
04b54bbb1e | ||
|
|
90cda2d6c2 | ||
|
|
dbd9ce78e6 | ||
|
|
cbc18e2693 | ||
|
|
67c675d7ad | ||
|
|
c080b39e47 | ||
|
|
8504da496b | ||
|
|
67240bd541 | ||
|
|
6ce0168ba5 | ||
|
|
67f5f04a77 | ||
|
|
91f8894285 | ||
|
|
655d63b4fd | ||
|
|
d3d843a11d | ||
|
|
57803bee22 | ||
|
|
be53dcc9c9 | ||
|
|
bd787e8f45 | ||
|
|
3cb7734eac | ||
|
|
d08ed7d1e9 | ||
|
|
f279e4ab01 | ||
|
|
35349657cd | ||
|
|
ce3b65e6dc | ||
|
|
0008cb4934 | ||
|
|
2ea5b4f0b8 | ||
|
|
b5818454f7 | ||
|
|
555def2da7 | ||
|
|
02b7613104 | ||
|
|
b342c6cf9c | ||
|
|
8a6857c295 | ||
|
|
21fd13f10d | ||
|
|
5cc7797f9e | ||
|
|
8bf2d6b6c8 | ||
|
|
85eb9776bd | ||
|
|
47539ec0e6 | ||
|
|
58b327a9f6 | ||
|
|
1107da7247 | ||
|
|
8d1fff9a82 | ||
|
|
2c5923ab1a | ||
|
|
1ad22b8881 | ||
|
|
0501773db1 | ||
|
|
cb8842941b | ||
|
|
5439a2c5c6 | ||
|
|
d347ac0154 | ||
|
|
9f33eb2e65 | ||
|
|
fe801b8fef | ||
|
|
6b158f33a3 | ||
|
|
5a6d233924 | ||
|
|
df513ca90a | ||
|
|
49bb640bae | ||
|
|
98fd00a655 | ||
|
|
16039b350d | ||
|
|
ebe86c6cec | ||
|
|
1f5e7ce598 | ||
|
|
4b981100db | ||
|
|
4344a3e2ea | ||
|
|
1542a979f9 | ||
|
|
81d242473a | ||
|
|
0ae171416f | ||
|
|
a59fa2977d | ||
|
|
7243918069 | ||
|
|
fa49971d49 | ||
|
|
77e3512714 | ||
|
|
337b43e7e4 | ||
|
|
6fd9e3d717 | ||
|
|
876f791ecd | ||
|
|
918bd6d3c3 | ||
|
|
f49be033c6 | ||
|
|
2a817e21cb | ||
|
|
a07d376fb1 | ||
|
|
e749bc58f4 | ||
|
|
821e084f28 | ||
|
|
2170376d1b | ||
|
|
8125b1cf08 | ||
|
|
ba60984f33 | ||
|
|
a875320e37 | ||
|
|
639624184d | ||
|
|
fe84cca1ad | ||
|
|
9d3958bd0b | ||
|
|
3a8e52de74 | ||
|
|
72227a0151 | ||
|
|
9f40cb114a | ||
|
|
2f461f13e3 | ||
|
|
7a24c173f6 | ||
|
|
fb60aeddae | ||
|
|
695736d1e4 | ||
|
|
f0396070eb | ||
|
|
f1166757ba | ||
|
|
9b76434ad5 | ||
|
|
440d0cd179 | ||
|
|
a047d30eca | ||
|
|
03d0f331f7 | ||
|
|
049674aeab | ||
|
|
50f053cada | ||
|
|
140af43c26 | ||
|
|
f467188876 | ||
|
|
4a4379b312 | ||
|
|
8c02fe7b89 | ||
|
|
11be920e90 | ||
|
|
8c19b355a5 | ||
|
|
67fd60275a | ||
|
|
b310490fa5 | ||
|
|
0ee0812a2b | ||
|
|
55bbff6346 | ||
|
|
9c6cfc1ff0 | ||
|
|
f753d7cd42 | ||
|
|
f5be1d6b65 | ||
|
|
00a684d877 | ||
|
|
1c4ee2feee | ||
|
|
876f12f2c4 | ||
|
|
6e9c1eebd9 | ||
|
|
42dfadfa1b | ||
|
|
b4d847cadd | ||
|
|
502226bfc8 | ||
|
|
53400d7edc | ||
|
|
62bcc84f6f | ||
|
|
2e54b56a01 | ||
|
|
2515039e18 | ||
|
|
a9c531b9eb | ||
|
|
0db50ecb2f | ||
|
|
388da82762 | ||
|
|
b5efffee9d | ||
|
|
3ec07d5db9 | ||
|
|
5c6a958ad8 | ||
|
|
ad8c94e982 | ||
|
|
e5bf6a813c | ||
|
|
f18095b004 | ||
|
|
c70e890966 | ||
|
|
986bb17656 | ||
|
|
92a43c5f7b | ||
|
|
9612ca6110 | ||
|
|
1f9560e873 | ||
|
|
c9d67c86fb | ||
|
|
3e9c5eca3b | ||
|
|
a1fd60ec2b | ||
|
|
7b8c974dec | ||
|
|
5b579cea47 | ||
|
|
7822df565e | ||
|
|
3435bf7f34 | ||
|
|
0772cae314 | ||
|
|
060c8dfff0 | ||
|
|
424a1f39eb | ||
|
|
06182a3443 | ||
|
|
a58b482061 | ||
|
|
accf91742c | ||
|
|
9dbed02329 | ||
|
|
73e3bb09d7 | ||
|
|
7e7a8a95e9 | ||
|
|
ed87ae51c0 | ||
|
|
bf4a16ae30 | ||
|
|
c198700812 | ||
|
|
8c483daf85 | ||
|
|
ba5760ff38 | ||
|
|
cd1735bb10 | ||
|
|
866c873daa | ||
|
|
c556e98f49 | ||
|
|
22abd785eb | ||
|
|
a692bd2cd4 | ||
|
|
776a083892 | ||
|
|
d823a38ce5 | ||
|
|
78d52882ca | ||
|
|
c4451bc43a | ||
|
|
0652ec95db | ||
|
|
6a0e021dac | ||
|
|
461d041c4d | ||
|
|
35f24d5b84 | ||
|
|
370c8fa220 | ||
|
|
0fca4d2c86 | ||
|
|
5de9278650 | ||
|
|
326c43ab3f | ||
|
|
32006033e6 | ||
|
|
517e7d9271 | ||
|
|
fdd2f8e6d2 | ||
|
|
027746ef6e | ||
|
|
53f831f40a | ||
|
|
9f81b4df4f | ||
|
|
bf54c909c9 | ||
|
|
dbf1234edf | ||
|
|
70d9d75801 | ||
|
|
bc70a95fca | ||
|
|
ee87e919c5 | ||
|
|
4f0eae366f | ||
|
|
de5ccaab8e | ||
|
|
4b7dc35cf4 | ||
|
|
bc705e14d8 | ||
|
|
ea5bb79366 | ||
|
|
e95dff2fa1 | ||
|
|
99dfe1eeae | ||
|
|
ed92bf335d | ||
|
|
3d55b537c6 | ||
|
|
d03fffdf8d | ||
|
|
7a909ebfb0 | ||
|
|
ac0dc9922e | ||
|
|
8b8802a078 | ||
|
|
f2a15a174f | ||
|
|
21c746a56c | ||
|
|
36add0afbf | ||
|
|
14e0396fcb | ||
|
|
100acc570a | ||
|
|
b9de37af80 | ||
|
|
f7c36ce0f9 | ||
|
|
f829ded456 | ||
|
|
2fac8fdde6 | ||
|
|
8e2d9a4cb9 | ||
|
|
295006f662 | ||
|
|
dcc128c70d | ||
|
|
c85fbebce6 | ||
|
|
e59801c69b | ||
|
|
5697dbc80f | ||
|
|
7d3648dc46 | ||
|
|
a6ca4b3817 | ||
|
|
e57fe14b61 | ||
|
|
115f1c2cc9 | ||
|
|
5e4caa69ce | ||
|
|
e7483b40b3 | ||
|
|
fa48b880c2 | ||
|
|
4ac875a811 | ||
|
|
3f61869179 | ||
|
|
60d87185e1 | ||
|
|
78120d40d9 | ||
|
|
95e0934755 | ||
|
|
1651429041 | ||
|
|
29e37749b3 | ||
|
|
1e1af46a12 | ||
|
|
1d2fe0d856 | ||
|
|
4f94b27800 | ||
|
|
4d72abf389 | ||
|
|
411013dbdc | ||
|
|
e87e331f4c | ||
|
|
2e91287b2e | ||
|
|
a0cb3bbd02 | ||
|
|
4a382c09ec | ||
|
|
626a416ff8 | ||
|
|
6c832a72ee | ||
|
|
c390098262 | ||
|
|
41f3ceb67d | ||
|
|
592358148d | ||
|
|
93a25498cf | ||
|
|
32f913ffbd | ||
|
|
621c4ebe15 | ||
|
|
0279bf3abb | ||
|
|
50c2e37aac | ||
|
|
6602e1a851 | ||
|
|
02b4638a22 | ||
|
|
ec117593f1 | ||
|
|
74bd7f3381 | ||
|
|
afa30abd33 | ||
|
|
70d1d8d760 | ||
|
|
5006ede266 | ||
|
|
0f41e91d41 | ||
|
|
3a20929db4 | ||
|
|
cee79f27ee | ||
|
|
aeb5dc2892 | ||
|
|
cfe0911e0d | ||
|
|
7c1f2d7c84 | ||
|
|
5db9a2f831 | ||
|
|
b4091f282a | ||
|
|
218bf2183d | ||
|
|
bb6edb3c39 | ||
|
|
6c2331ffd7 | ||
|
|
08a897424b | ||
|
|
acd7ad9190 | ||
|
|
b246584a02 | ||
|
|
61a75bfe07 | ||
|
|
ef089dd867 | ||
|
|
e3d44612c1 | ||
|
|
b2388f1294 | ||
|
|
a571c1fb46 | ||
|
|
01340acad2 | ||
|
|
700ca23a71 | ||
|
|
f4f0e444bf | ||
|
|
20aaeba547 | ||
|
|
4b358ff43b | ||
|
|
e58d75e4d7 | ||
|
|
fb58737142 | ||
|
|
26db80c270 | ||
|
|
9eb3470c9c | ||
|
|
a449dd7d1c | ||
|
|
fc4fe33703 | ||
|
|
e11bfacfcf | ||
|
|
a9c49c50a0 | ||
|
|
8979337313 | ||
|
|
7ffab5d998 | ||
|
|
3ccf222acb | ||
|
|
2781f8e2f1 | ||
|
|
3d55f69338 | ||
|
|
cc9bc2cb80 | ||
|
|
80ac59ee5b | ||
|
|
5d6a6dd6c0 | ||
|
|
c676e2139d | ||
|
|
7361c98b2d | ||
|
|
5cc47de912 | ||
|
|
6d342a3c5b | ||
|
|
336051870e | ||
|
|
38c6d022bd | ||
|
|
c138367df6 | ||
|
|
da404dc0f2 | ||
|
|
28e43fe7af | ||
|
|
3ec25f437b | ||
|
|
a34276e9b3 | ||
|
|
c2baacc0a4 | ||
|
|
fcec4bedbe | ||
|
|
813a5e0931 | ||
|
|
bd4abb15a3 | ||
|
|
7f84283539 | ||
|
|
47b1a0d6fa | ||
|
|
ce168ecac2 | ||
|
|
4f0ddb60e7 | ||
|
|
b929a56f46 | ||
|
|
74af6409d4 | ||
|
|
0e77072dcc | ||
|
|
2437eb3cce | ||
|
|
a12c94caff | ||
|
|
542c1616b8 | ||
|
|
8697f0bd26 | ||
|
|
a9f18f8093 | ||
|
|
8e5e230b81 | ||
|
|
c0985e93b7 | ||
|
|
fb4f7555c7 | ||
|
|
f2e7a2e794 | ||
|
|
9e4854955c | ||
|
|
319ac225e4 | ||
|
|
a9d3283d97 | ||
|
|
edf0412464 | ||
|
|
e6194a4b83 | ||
|
|
7f05990623 | ||
|
|
e16f2a566f | ||
|
|
a36fef8a66 | ||
|
|
6500e1d205 | ||
|
|
9f7484e4e9 | ||
|
|
0ba702ccf4 | ||
|
|
6f91198b57 | ||
|
|
cf0a72aecd | ||
|
|
f6fd6ee777 | ||
|
|
1e66d052fd | ||
|
|
e5974ac4b0 | ||
|
|
50a0c3482d | ||
|
|
389a29b017 | ||
|
|
9dcf9375e8 | ||
|
|
1d6d41fb91 | ||
|
|
a3d4307892 | ||
|
|
a446106041 | ||
|
|
607172b6ec | ||
|
|
94757277bc | ||
|
|
deab86867c | ||
|
|
c0c5b3bc6b | ||
|
|
a947f298e6 | ||
|
|
1b0128ecb2 | ||
|
|
c5395db1f1 | ||
|
|
6e5382fc99 | ||
|
|
134592adaa | ||
|
|
36e614f550 | ||
|
|
7bfed98b48 | ||
|
|
f471096fd0 | ||
|
|
4cebade95d | ||
|
|
a8cd18faf3 | ||
|
|
e34c543660 | ||
|
|
598364ad0f | ||
|
|
211dbe9aee | ||
|
|
4829527dac | ||
|
|
cc8dde402f | ||
|
|
2b67ad17aa | ||
|
|
6da3522499 | ||
|
|
97606bbdef | ||
|
|
a15885dd74 | ||
|
|
87c201c92a | ||
|
|
d77736c21a | ||
|
|
86bd5f6922 | ||
|
|
fe271a4e35 | ||
|
|
75455d4000 | ||
|
|
82e24f521f | ||
|
|
5605e34f7b | ||
|
|
06598531e0 | ||
|
|
b1d43f8d41 | ||
|
|
b53c38c9fd | ||
|
|
03715f6c6b | ||
|
|
07481396e0 | ||
|
|
bab91e4402 | ||
|
|
fde40319ef | ||
|
|
94e330d4fa | ||
|
|
087543d723 | ||
|
|
6a759d936a | ||
|
|
7c31240bb8 | ||
|
|
25146b4306 | ||
|
|
240561850b | ||
|
|
39a1e37441 | ||
|
|
4c02f50ef5 | ||
|
|
f583b86334 | ||
|
|
118e8e1470 | ||
|
|
afcea9c72b | ||
|
|
27176cc6bb | ||
|
|
f1e4b7da7b | ||
|
|
f065a267f6 | ||
|
|
17f8014909 | ||
|
|
8ba04562c3 | ||
|
|
285747b1d1 | ||
|
|
7bb8b8f4ba | ||
|
|
59c242bbf6 | ||
|
|
a2bacd7d3f | ||
|
|
9babcc4811 | ||
|
|
a0f665ec3c | ||
|
|
ecdf42c17f | ||
|
|
be9ee1d138 | ||
|
|
9e9ead2ac4 | ||
|
|
4f78226f8b | ||
|
|
54c9c3156c | ||
|
|
6ecbbf796e | ||
|
|
603e51c43f | ||
|
|
ca4671126e | ||
|
|
6ea26b508a | ||
|
|
887cccb2c1 | ||
|
|
d975196cfa | ||
|
|
1f39b28f49 | ||
|
|
2738db22fb | ||
|
|
1978ddde73 | ||
|
|
c2bfda22ab | ||
|
|
d4da9b98d6 | ||
|
|
e4f5912294 | ||
|
|
750fffdf71 | ||
|
|
388e74af52 | ||
|
|
f9354fff2f | ||
|
|
ff1f173fc2 | ||
|
|
f8073a7b63 | ||
|
|
807f1cedaa | ||
|
|
bf9c68c88a | ||
|
|
189cba0fbe | ||
|
|
69f726f16c | ||
|
|
65652f7a75 | ||
|
|
47f9ab2f56 | ||
|
|
5dd51e6149 | ||
|
|
6a6d254a9f | ||
|
|
fd453f2c7b | ||
|
|
5d06a82c5d | ||
|
|
847868b4ba | ||
|
|
38ca178cf3 | ||
|
|
9427d22f99 | ||
|
|
7b1428a498 | ||
|
|
ec72432cec | ||
|
|
2339172df2 | ||
|
|
268b808bf8 | ||
|
|
74898bac3b | ||
|
|
e0fbca02d4 | ||
|
|
21355b4208 | ||
|
|
251b84ff2c | ||
|
|
537b62917f | ||
|
|
71a784cfa2 | ||
|
|
8ee0fe9863 | ||
|
|
8f164e4df5 | ||
|
|
06ecc6511b | ||
|
|
3529bdec9b | ||
|
|
486b43f8c7 | ||
|
|
89f0e4df80 | ||
|
|
399fb5b7fb | ||
|
|
19f1ed949c | ||
|
|
d3a1001094 | ||
|
|
dc7e3ea1e3 | ||
|
|
f22b703a51 | ||
|
|
c40129d610 | ||
|
|
8dc93f1792 | ||
|
|
f4c40bf79d | ||
|
|
9cc50a614b | ||
|
|
bcb07a67f6 | ||
|
|
25ea04f1db | ||
|
|
06ffd4882d | ||
|
|
19a5e1d63b | ||
|
|
ec88b66dad | ||
|
|
aa2d7f00c2 | ||
|
|
3e125443aa | ||
|
|
3c271b8b1e | ||
|
|
6d92ba2c6c | ||
|
|
c26dc69e1b | ||
|
|
b0de0b4609 | ||
|
|
f54641511a | ||
|
|
8cf76f5e11 | ||
|
|
18c24014da | ||
|
|
0ae39bda8d | ||
|
|
051685baa1 | ||
|
|
07f53aebdc | ||
|
|
bd6d36b3f6 | ||
|
|
b168479429 | ||
|
|
b447b0cd78 | ||
|
|
4bd2386632 | ||
|
|
83b6b62c1b | ||
|
|
5826cc9d9e | ||
|
|
252432ae54 | ||
|
|
8821629333 | ||
|
|
a2092a8faf | ||
|
|
2b6f4241b4 | ||
|
|
e3dd16d490 | ||
|
|
9e1fd923f6 | ||
|
|
3684789858 | ||
|
|
1ac1dd428a | ||
|
|
65dbd29c22 | ||
|
|
164774d7e1 | ||
|
|
507020f408 | ||
|
|
a667e03fc9 | ||
|
|
1045344943 | ||
|
|
5e469db420 | ||
|
|
946e84d194 | ||
|
|
162aba60eb | ||
|
|
d8a874c32b | ||
|
|
9c451d9ac6 | ||
|
|
8f3f24672c | ||
|
|
0eb7b716d9 | ||
|
|
ee9684e60f | ||
|
|
e0cbe413e1 | ||
|
|
2523dd6220 | ||
|
|
c504d97017 | ||
|
|
b783f09fc6 | ||
|
|
a301478a13 | ||
|
|
63b450a2a5 | ||
|
|
843b77aaaa | ||
|
|
3641727edb | ||
|
|
38e2f835ed | ||
|
|
bd4bbed592 | ||
|
|
994b501188 | ||
|
|
dfa9381814 | ||
|
|
2a85feda4b | ||
|
|
ad46af9168 | ||
|
|
2fed02211c | ||
|
|
237daa8aaf | ||
|
|
8aeca6c033 | ||
|
|
fd82876086 | ||
|
|
be1a668e95 | ||
|
|
9d4eab32d8 | ||
|
|
b4ba7b69b8 | ||
|
|
deef659aef | ||
|
|
4b99e84242 | ||
|
|
06bdf7c64c | ||
|
|
e1225b5729 | ||
|
|
871cc2f62d | ||
|
|
bc23bf11db | ||
|
|
b55575e622 | ||
|
|
328f0e7135 | ||
|
|
a52814eed9 | ||
|
|
071a9e882d | ||
|
|
4e2ca3330c | ||
|
|
408d9f3e7a | ||
|
|
0681a5c86a | ||
|
|
df09c3f555 | ||
|
|
c41814fd2d | ||
|
|
c2557cc432 | ||
|
|
3425726c50 | ||
|
|
46175a22d8 | ||
|
|
bcf0e15ad7 | ||
|
|
b91c349cd5 | ||
|
|
d252816706 | ||
|
|
729117af68 | ||
|
|
cd4d8d55ec | ||
|
|
f26abc89a6 | ||
|
|
b5abbe819f | ||
|
|
a351484997 | ||
|
|
099eff8891 | ||
|
|
c4cb167d4a | ||
|
|
38e100ab19 | ||
|
|
db95a0d6c3 | ||
|
|
df07964db3 | ||
|
|
fbc4c4ad9a | ||
|
|
4454b3e1ae | ||
|
|
f9321fccbb | ||
|
|
3c2252b7c0 | ||
|
|
51c952654c | ||
|
|
80e47be65f | ||
|
|
38dc3e93ee |
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
@@ -1,4 +0,0 @@
|
||||
github: [ncw]
|
||||
patreon: njcw
|
||||
liberapay: ncw
|
||||
custom: ["https://rclone.org/donate/"]
|
||||
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
138
.github/workflows/build.yml
vendored
138
.github/workflows/build.yml
vendored
@@ -8,29 +8,31 @@ name: build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
- '**'
|
||||
tags:
|
||||
- '*'
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.15', 'go1.16']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.20'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -39,9 +41,16 @@ jobs:
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -49,49 +58,38 @@ jobs:
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
go: '1.20'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.20'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.15
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.16
|
||||
- job_name: go1.19
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
go: '1.19'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -101,15 +99,15 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
@@ -126,7 +124,7 @@ jobs:
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
@@ -134,7 +132,7 @@ jobs:
|
||||
run: |
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
if: matrix.os == 'macos-11'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
shell: powershell
|
||||
@@ -165,7 +163,7 @@ jobs:
|
||||
env
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -177,6 +175,11 @@ jobs:
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Rclone version
|
||||
shell: bash
|
||||
run: |
|
||||
rclone version
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -214,48 +217,57 @@ jobs:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
# Run govulncheck on the latest go version, the one we build binaries with
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v1
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
||||
go-version: '1.20'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -273,30 +285,32 @@ jobs:
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
@@ -304,12 +318,12 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
@@ -317,12 +331,12 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
@@ -330,7 +344,7 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
@@ -338,4 +352,4 @@ jobs:
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
61
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
61
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
26
.github/workflows/build_publish_docker_image.yml
vendored
26
.github/workflows/build_publish_docker_image.yml
vendored
@@ -1,26 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
|
||||
14
.github/workflows/winget.yml
vendored
Normal file
14
.github/workflows/winget.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Publish to Winget
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: windows-latest # Action can only run on Windows
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: Rclone.Rclone
|
||||
installers-regex: '-windows-\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
@@ -2,15 +2,17 @@
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
- staticcheck
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
@@ -20,11 +22,35 @@ issues:
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
rules:
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
|
||||
@@ -77,7 +77,7 @@ Make sure you
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||
|
||||
When you are done with that push your changes to Github:
|
||||
When you are done with that push your changes to GitHub:
|
||||
|
||||
git push -u origin my-new-feature
|
||||
|
||||
@@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
|
||||
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
|
||||
## Using Git and Github ##
|
||||
## Using Git and GitHub ##
|
||||
|
||||
### Committing your changes ###
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
@@ -15,7 +15,9 @@ Current active maintainers of rclone are:
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | tardigrade backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
| albertony | @albertony | |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
17139
MANUAL.html
generated
17139
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
21099
MANUAL.txt
generated
21099
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
15
Makefile
15
Makefile
@@ -81,6 +81,9 @@ quicktest:
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@@ -93,11 +96,11 @@ build_dep:
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
|
||||
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
|
||||
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@@ -245,18 +248,18 @@ retag:
|
||||
startdev:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next version is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
startstable:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
|
||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
40
README.md
40
README.md
@@ -1,4 +1,5 @@
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
@@ -20,27 +21,37 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
@@ -50,23 +61,29 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
@@ -76,6 +93,19 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
### Virtual storage providers
|
||||
|
||||
These backends adapt or modify other storage providers
|
||||
|
||||
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
@@ -90,7 +120,7 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||
|
||||
## Installation & documentation
|
||||
|
||||
@@ -111,5 +141,5 @@ Please see the [rclone website](https://rclone.org/) for:
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
This is free software under the terms of the MIT license (check the
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
14
RELEASE.md
14
RELEASE.md
@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
|
||||
## Making a release
|
||||
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull
|
||||
* git pull # IMPORTANT
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
* make test # see integration test server or run locally
|
||||
@@ -21,6 +21,7 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
@@ -53,6 +54,14 @@ doing that so it may be necessary to roll back dependencies to the
|
||||
version specified by `make updatedirect` in order to get rclone to
|
||||
build.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
|
||||
bin/tidy-beta v1.55
|
||||
|
||||
where the version number is that of a couple ago to remove old beta binaries.
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
@@ -66,8 +75,7 @@ Set vars
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package alias implements a virtual provider to rename existing remotes.
|
||||
package alias
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package all imports all the backends
|
||||
package all
|
||||
|
||||
import (
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
@@ -20,17 +22,21 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
_ "github.com/rclone/rclone/backend/netstorage"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
@@ -39,9 +45,10 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/smb"
|
||||
_ "github.com/rclone/rclone/backend/storj"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
|
||||
@@ -435,7 +435,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
|
||||
query += " AND kind:" + folderKind
|
||||
} else if filesOnly {
|
||||
query += " AND kind:" + fileKind
|
||||
} else {
|
||||
//} else {
|
||||
// FIXME none of these work
|
||||
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
|
||||
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
|
||||
@@ -556,9 +556,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//
|
||||
// This is a workaround for Amazon sometimes returning
|
||||
//
|
||||
// * 408 REQUEST_TIMEOUT
|
||||
// * 504 GATEWAY_TIMEOUT
|
||||
// * 500 Internal server error
|
||||
// - 408 REQUEST_TIMEOUT
|
||||
// - 504 GATEWAY_TIMEOUT
|
||||
// - 500 Internal server error
|
||||
//
|
||||
// At the end of large uploads. The speculation is that the timeout
|
||||
// is waiting for the sha1 hashing to complete and the file may well
|
||||
@@ -626,7 +626,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -685,9 +685,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1002,7 +1002,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -21,7 +21,26 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration2 runs integration tests against the remote
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestAzureBlob:"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name,
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -34,32 +53,24 @@ var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"password": "my secret",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotNil(t, tokenRefresher)
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
accessTier string
|
||||
want bool
|
||||
}{
|
||||
"hot": {"hot", true},
|
||||
"HOT": {"HOT", true},
|
||||
"Hot": {"Hot", true},
|
||||
"cool": {"cool", true},
|
||||
"archive": {"archive", true},
|
||||
"empty": {"", false},
|
||||
"unknown": {"unknown", false},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got := validateAccessTier(test.accessTier)
|
||||
assert.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
//go:build plan9 || solaris || js || !go1.18
|
||||
// +build plan9 solaris js !go1.18
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
const (
|
||||
azureResource = "https://storage.azure.com"
|
||||
imdsAPIVersion = "2018-02-01"
|
||||
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
)
|
||||
|
||||
// This custom type is used to add the port the test server has bound to
|
||||
// to the request context.
|
||||
type testPortKey string
|
||||
|
||||
type msiIdentifierType int
|
||||
|
||||
const (
|
||||
msiClientID msiIdentifierType = iota
|
||||
msiObjectID
|
||||
msiResourceID
|
||||
)
|
||||
|
||||
type userMSI struct {
|
||||
Type msiIdentifierType
|
||||
Value string
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
Response *http.Response
|
||||
}
|
||||
|
||||
func (e httpError) Error() string {
|
||||
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||
}
|
||||
|
||||
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||
// Metadata Service.
|
||||
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||
result := adal.Token{}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||
return result, err
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("resource", azureResource)
|
||||
params.Set("api-version", imdsAPIVersion)
|
||||
|
||||
// Specify user-assigned identity if requested.
|
||||
if identity != nil {
|
||||
switch identity.Type {
|
||||
case msiClientID:
|
||||
params.Set("client_id", identity.Value)
|
||||
case msiObjectID:
|
||||
params.Set("object_id", identity.Value)
|
||||
case msiResourceID:
|
||||
params.Set("mi_res_id", identity.Value)
|
||||
default:
|
||||
// If this happens, the calling function and this one don't agree on
|
||||
// what valid ID types exist.
|
||||
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||
}
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
// The Metadata header is required by all calls to IMDS.
|
||||
req.Header.Set("Metadata", "true")
|
||||
|
||||
// If this function is run in a test, query the test server instead of IMDS.
|
||||
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||
if isTest {
|
||||
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||
req.Host = req.URL.Host
|
||||
}
|
||||
|
||||
// Send request
|
||||
httpClient := fshttp.NewClient(ctx)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||
}
|
||||
defer func() { // resp and Body should not be nil
|
||||
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||
}
|
||||
}()
|
||||
// Check if the status code indicates success
|
||||
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||
switch resp.StatusCode {
|
||||
case 200, 201, 202:
|
||||
break
|
||||
default:
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||
return result, httpError{Response: resp}
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
|
||||
}
|
||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
|
||||
// This would be a good place to persist the token if a large number of rclone
|
||||
// invocations are being made in a short amount of time. If the token is
|
||||
// persisted, the azureblob code will need to check for expiry before every
|
||||
// storage API call.
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
require.NoError(t, err)
|
||||
parameters := r.URL.Query()
|
||||
(*actual)["path"] = r.URL.Path
|
||||
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||
(*actual)["method"] = r.Method
|
||||
for paramName := range parameters {
|
||||
(*actual)[paramName] = parameters.Get(paramName)
|
||||
}
|
||||
// Make response.
|
||||
response := adal.Token{}
|
||||
responseBytes, err := json.Marshal(response)
|
||||
require.NoError(t, err)
|
||||
_, err = w.Write(responseBytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestManagedIdentity(t *testing.T) {
|
||||
// test user-assigned identity specifiers to use
|
||||
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||
tests := []struct {
|
||||
identity *userMSI
|
||||
identityParameterName string
|
||||
expectedAbsent []string
|
||||
}{
|
||||
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||
}
|
||||
alwaysExpected := map[string]string{
|
||||
"path": "/metadata/identity/oauth2/token",
|
||||
"resource": "https://storage.azure.com",
|
||||
"Metadata": "true",
|
||||
"api-version": "2018-02-01",
|
||||
"method": "GET",
|
||||
}
|
||||
for _, test := range tests {
|
||||
actual := make(map[string]string, 10)
|
||||
testServer := httptest.NewServer(handler(t, &actual))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, test.identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate expected query parameters present
|
||||
expected := make(map[string]string)
|
||||
for k, v := range alwaysExpected {
|
||||
expected[k] = v
|
||||
}
|
||||
if test.identity != nil {
|
||||
expected[test.identityParameterName] = test.identity.Value
|
||||
}
|
||||
|
||||
for key := range expected {
|
||||
value, exists := actual[key]
|
||||
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||
test.identityParameterName, key) {
|
||||
assert.Equalf(t, expected[key], value,
|
||||
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate unexpected query parameters absent
|
||||
for _, key := range test.expectedAbsent {
|
||||
_, exists := actual[key]
|
||||
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errorHandler(resultCode int) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Test error generated", resultCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIMDSErrors(t *testing.T) {
|
||||
errorCodes := []int{404, 429, 500}
|
||||
for _, code := range errorCodes {
|
||||
testServer := httptest.NewServer(errorHandler(code))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, nil)
|
||||
require.Error(t, err)
|
||||
httpErr, ok := err.(httpError)
|
||||
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Backblaze B2 API.
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -238,7 +239,7 @@ type GetFileInfoRequest struct {
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// 10 number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
|
||||
117
backend/b2/b2.go
117
backend/b2/b2.go
@@ -1,4 +1,4 @@
|
||||
// Package b2 provides an interface to the Backblaze B2 object storage system
|
||||
// Package b2 provides an interface to the Backblaze B2 object storage system.
|
||||
package b2
|
||||
|
||||
// FIXME should we remove sha1 checks from here as rclone now supports
|
||||
@@ -64,7 +64,8 @@ const (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -106,6 +107,11 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "version_at",
|
||||
Help: "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: fs.Time{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
@@ -160,7 +166,15 @@ free egress for data downloaded through the Cloudflare network.
|
||||
Rclone works with private buckets by sending an "Authorization" header.
|
||||
If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
The URL provided here SHOULD have the protocol and SHOULD NOT have
|
||||
a trailing slash or specify the /file/bucket subpath as rclone will
|
||||
request files with "{download_url}/file/{bucket_name}/{path}".
|
||||
|
||||
Example:
|
||||
> https://mysubdomain.mydomain.tld
|
||||
(No trailing "/", "file" or "bucket")`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_auth_duration",
|
||||
@@ -203,6 +217,7 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
@@ -265,7 +280,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("B2 root")
|
||||
return "B2 root"
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("B2 bucket %s", f.rootBucket)
|
||||
@@ -641,15 +656,15 @@ var errEndList = errors.New("end list")
|
||||
//
|
||||
// (bucket, directory) is the starting directory
|
||||
//
|
||||
// If prefix is set then it is removed from all file names
|
||||
// If prefix is set then it is removed from all file names.
|
||||
//
|
||||
// If addBucket is set then it adds the bucket to the start of the
|
||||
// remotes generated
|
||||
// remotes generated.
|
||||
//
|
||||
// If recurse is set the function will recursively list
|
||||
// If recurse is set the function will recursively list.
|
||||
//
|
||||
// If limit is > 0 then it limits to that many files (must be less
|
||||
// than 1000)
|
||||
// than 1000).
|
||||
//
|
||||
// If hidden is set then it will list the hidden (deleted) files too.
|
||||
//
|
||||
@@ -688,9 +703,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
Method: "POST",
|
||||
Path: "/b2_list_file_names",
|
||||
}
|
||||
if hidden {
|
||||
if hidden || f.opt.VersionAt.IsSet() {
|
||||
opts.Path = "/b2_list_file_versions"
|
||||
}
|
||||
|
||||
lastFileName := ""
|
||||
|
||||
for {
|
||||
var response api.ListFileNamesResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -720,7 +738,21 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
|
||||
if f.opt.VersionAt.IsSet() {
|
||||
if time.Time(file.UploadTimestamp).After(time.Time(f.opt.VersionAt)) {
|
||||
// Ignore versions that were created after the specified time
|
||||
continue
|
||||
}
|
||||
|
||||
if file.Name == lastFileName {
|
||||
// Ignore versions before the already returned version
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Send object
|
||||
lastFileName = file.Name
|
||||
err = fn(remote, file, isDirectory)
|
||||
if err != nil {
|
||||
if err == errEndList {
|
||||
@@ -993,7 +1025,7 @@ func (f *Fs) clearBucketID(bucket string) {
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1173,10 +1205,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return time.Since(time.Time(timestamp)).Hours() > 24
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
@@ -1192,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object.Name, "Can't create object %v", err)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(ctx, err)
|
||||
@@ -1206,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
if err != nil {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
if oldOnly && last != remote {
|
||||
// Check current version of the file
|
||||
if object.Action == "hide" {
|
||||
@@ -1305,9 +1334,9 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1449,26 +1478,23 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// Clean the SHA1
|
||||
//
|
||||
// Make sure it is lower case
|
||||
// Make sure it is lower case.
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (e.g. Cyberduck) use this
|
||||
func cleanSHA1(sha1 string) (out string) {
|
||||
out = strings.ToLower(sha1)
|
||||
func cleanSHA1(sha1 string) string {
|
||||
const unverified = "unverified:"
|
||||
if strings.HasPrefix(out, unverified) {
|
||||
out = out[len(unverified):]
|
||||
}
|
||||
return out
|
||||
return strings.TrimPrefix(strings.ToLower(sha1), unverified)
|
||||
}
|
||||
|
||||
// decodeMetaDataRaw sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
|
||||
o.id = ID
|
||||
o.sha1 = SHA1
|
||||
@@ -1487,10 +1513,11 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
// decodeMetaData sets the metadata in the object from an api.File
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) decodeMetaData(info *api.File) (err error) {
|
||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||
}
|
||||
@@ -1498,10 +1525,11 @@ func (o *Object) decodeMetaData(info *api.File) (err error) {
|
||||
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
|
||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||
}
|
||||
@@ -1559,10 +1587,11 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.id != "" {
|
||||
return nil
|
||||
@@ -1820,6 +1849,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -1975,6 +2007,9 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
if o.fs.opt.HardDelete {
|
||||
return o.fs.deleteByID(ctx, o.id, bucketPath)
|
||||
}
|
||||
|
||||
@@ -14,12 +14,15 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -88,21 +91,19 @@ type largeUpload struct {
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
chunkSize := defaultChunkSize
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
@@ -429,18 +430,47 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
uploadPool *pool.Pool
|
||||
ci = fs.GetConfig(ctx)
|
||||
)
|
||||
// If using large chunk size then make a temporary pool
|
||||
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
||||
uploadPool = up.f.pool
|
||||
} else {
|
||||
uploadPool = pool.New(
|
||||
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
||||
int(up.chunkSize),
|
||||
ci.Transfers,
|
||||
up.f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
defer uploadPool.Flush()
|
||||
}
|
||||
// Get an upload token and a buffer
|
||||
getBuf := func() (buf []byte) {
|
||||
up.f.getBuf(true)
|
||||
if !up.doCopy {
|
||||
buf = uploadPool.Get()
|
||||
}
|
||||
return buf
|
||||
}
|
||||
// Put an upload token and a buffer
|
||||
putBuf := func(buf []byte) {
|
||||
if !up.doCopy {
|
||||
uploadPool.Put(buf)
|
||||
}
|
||||
up.f.putBuf(nil, true)
|
||||
}
|
||||
g.Go(func() error {
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
buf := up.f.getBuf(up.doCopy)
|
||||
buf := getBuf()
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
putBuf(buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -454,14 +484,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
buf = buf[:reqSize]
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
putBuf(buf)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putBuf(buf, up.doCopy)
|
||||
defer putBuf(buf)
|
||||
if !up.doCopy {
|
||||
err = up.transferChunk(gCtx, part, buf)
|
||||
} else {
|
||||
|
||||
@@ -14,7 +14,7 @@ const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// box API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -17,9 +17,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -45,7 +46,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -76,6 +76,11 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
type boxCustomClaims struct {
|
||||
jwt.StandardClaims
|
||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -178,12 +183,12 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(ctx)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
file, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
}
|
||||
@@ -194,34 +199,31 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
return boxConfig, nil
|
||||
}
|
||||
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
"aud": tokenURL,
|
||||
"jti": val,
|
||||
claims = &boxCustomClaims{
|
||||
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
Id: val,
|
||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||
Subject: boxConfig.EnterpriseID,
|
||||
Audience: tokenURL,
|
||||
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
|
||||
},
|
||||
BoxSubType: boxSubType,
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||
signingHeaders := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
||||
signingHeaders := map[string]interface{}{
|
||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
|
||||
return signingHeaders
|
||||
}
|
||||
|
||||
@@ -266,7 +268,7 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
@@ -692,7 +694,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -752,7 +754,7 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -792,9 +794,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
//
|
||||
// This will produce an error if the object already exists
|
||||
// This will produce an error if the object already exists.
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -877,9 +879,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -897,7 +899,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||
dstPath := f.rootSlash() + remote
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
@@ -995,9 +997,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1235,7 +1237,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1346,9 +1347,9 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if o.fs.tokenRenewer != nil {
|
||||
o.fs.tokenRenewer.Start()
|
||||
|
||||
15
backend/cache/cache.go
vendored
15
backend/cache/cache.go
vendored
@@ -1,6 +1,7 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package cache implements a virtual provider to cache existing remotes.
|
||||
package cache
|
||||
|
||||
import (
|
||||
@@ -394,7 +395,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
rps := rate.Inf
|
||||
if opt.Rps > 0 {
|
||||
rps = rate.Limit(float64(opt.Rps))
|
||||
}
|
||||
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
if opt.PlexURL != "" {
|
||||
@@ -1033,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||
}
|
||||
entries = nil
|
||||
entries = nil //nolint:ineffassign
|
||||
|
||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||
var batchDirectories []*Directory
|
||||
@@ -1124,7 +1129,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
case fs.Directory:
|
||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||
default:
|
||||
return fmt.Errorf("Unknown object type %T", entry)
|
||||
return fmt.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1743,7 +1748,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -1782,7 +1787,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// StopBackgroundRunners will signall all the runners to stop their work
|
||||
// StopBackgroundRunners will signal all the runners to stop their work
|
||||
// can be triggered from a terminate signal or from testing between runs
|
||||
func (f *Fs) StopBackgroundRunners() {
|
||||
f.cleanupChan <- false
|
||||
|
||||
89
backend/cache/cache_internal_test.go
vendored
89
backend/cache/cache_internal_test.go
vendored
@@ -11,7 +11,6 @@ import (
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -102,14 +101,12 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
|
||||
// Instantiate inner fs
|
||||
innerFolder := "inner"
|
||||
runInstance.mkdir(t, rootFs, innerFolder)
|
||||
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
|
||||
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
|
||||
|
||||
runInstance.writeObjectString(t, rootFs2, "one", "content")
|
||||
listRoot, err := runInstance.list(t, rootFs, "")
|
||||
@@ -167,7 +164,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
|
||||
for _, r := range li2 {
|
||||
var err error
|
||||
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
||||
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
||||
if err != nil || len(ci) == 0 {
|
||||
log.Printf("========== '%v' not in cache", r)
|
||||
} else {
|
||||
@@ -226,8 +223,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
|
||||
func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -259,8 +255,7 @@ func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
|
||||
func TestInternalObjNotFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tionf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||
require.Error(t, err)
|
||||
@@ -270,8 +265,7 @@ func TestInternalObjNotFound(t *testing.T) {
|
||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -298,8 +292,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
// write the object
|
||||
runInstance.writeRemoteString(t, rootFs, "one", "one content")
|
||||
@@ -317,8 +310,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
var err error
|
||||
|
||||
// create some rand test data
|
||||
@@ -347,8 +339,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
}
|
||||
@@ -378,8 +369,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
}
|
||||
@@ -405,8 +395,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -460,8 +449,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
|
||||
func TestInternalMoveWithNotify(t *testing.T) {
|
||||
id := fmt.Sprintf("timwn%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
@@ -547,8 +535,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
|
||||
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
@@ -634,8 +621,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
|
||||
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -667,8 +653,7 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -689,8 +674,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -725,8 +709,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -763,9 +746,7 @@ func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skipf("skipping crypt")
|
||||
@@ -841,7 +822,7 @@ func newRun() *run {
|
||||
}
|
||||
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||
}
|
||||
@@ -866,7 +847,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
||||
return enc
|
||||
}
|
||||
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.FileSections() {
|
||||
@@ -959,10 +940,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
err = f.Mkdir(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
runInstance.cleanupFs(t, f)
|
||||
})
|
||||
|
||||
return f, boltDb
|
||||
}
|
||||
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
||||
err := f.Features().Purge(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
@@ -984,7 +970,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
chunk := int64(1024)
|
||||
cnt := size / chunk
|
||||
left := size % chunk
|
||||
f, err := ioutil.TempFile("", "rclonecache-tempfile")
|
||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cnt); i++ {
|
||||
@@ -1112,27 +1098,6 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = in.Close()
|
||||
}()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = out.Close()
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -19,7 +19,7 @@ func TestIntegration(t *testing.T) {
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
}
|
||||
|
||||
24
backend/cache/cache_upload_test.go
vendored
24
backend/cache/cache_upload_test.go
vendored
@@ -21,10 +21,8 @@ import (
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
@@ -63,9 +61,7 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
@@ -73,19 +69,15 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
@@ -119,10 +111,8 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
@@ -162,10 +152,8 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
@@ -213,9 +201,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
@@ -343,9 +329,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
|
||||
6
backend/cache/plex.go
vendored
6
backend/cache/plex.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
|
||||
continue
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
data, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -213,7 +213,7 @@ func (p *plexConnector) authenticate() error {
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %v", err)
|
||||
return fmt.Errorf("failed to obtain token: %w", err)
|
||||
}
|
||||
tokenGen, ok := get(data, "user", "authToken")
|
||||
if !ok {
|
||||
|
||||
5
backend/cache/storage_memory.go
vendored
5
backend/cache/storage_memory.go
vendored
@@ -76,10 +76,7 @@ func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
|
||||
|
||||
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
||||
func (m *Memory) CleanChunksByNeed(offset int64) {
|
||||
var items map[string]cache.Item
|
||||
|
||||
items = m.db.Items()
|
||||
for key := range items {
|
||||
for key := range m.db.Items() {
|
||||
sepIdx := strings.LastIndex(key, "-")
|
||||
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
||||
if err != nil {
|
||||
|
||||
24
backend/cache/storage_persistent.go
vendored
24
backend/cache/storage_persistent.go
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -250,7 +249,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
||||
if val != nil {
|
||||
err := json.Unmarshal(val, cachedDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during unmarshalling obj: %v", err)
|
||||
return fmt.Errorf("error during unmarshalling obj: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
||||
@@ -456,10 +455,7 @@ func (b *Persistent) HasEntry(remote string) bool {
|
||||
|
||||
return fmt.Errorf("couldn't find object (%v)", remote)
|
||||
})
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// HasChunk confirms the existence of a single chunk of an object
|
||||
@@ -476,7 +472,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
|
||||
var data []byte
|
||||
|
||||
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
||||
data, err := ioutil.ReadFile(fp)
|
||||
data, err := os.ReadFile(fp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -489,7 +485,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
|
||||
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
||||
|
||||
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
||||
err := ioutil.WriteFile(filePath, data, os.ModePerm)
|
||||
err := os.WriteFile(filePath, data, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -554,7 +550,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||
if dataTsBucket == nil {
|
||||
return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket)
|
||||
}
|
||||
// iterate through ts
|
||||
c := dataTsBucket.Cursor()
|
||||
@@ -904,16 +900,16 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
return fmt.Errorf("pending upload (%v) not found: %w", remote, err)
|
||||
}
|
||||
tempObj.Started = false
|
||||
v2, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
}
|
||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -969,11 +965,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
||||
}
|
||||
v2, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
}
|
||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -32,7 +31,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
//
|
||||
// Chunker's composite files have one or more chunks
|
||||
// and optional metadata object. If it's present,
|
||||
// meta object is named after the original file.
|
||||
@@ -65,7 +63,7 @@ import (
|
||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||
//
|
||||
// When transactions is set to the norename style, data chunks will
|
||||
// keep their temporary chunk names (with the transacion identifier
|
||||
// keep their temporary chunk names (with the transaction identifier
|
||||
// suffix). To distinguish them from temporary chunks, the txn field
|
||||
// of the metadata file is set to match the transaction identifier of
|
||||
// the data chunks.
|
||||
@@ -79,7 +77,6 @@ import (
|
||||
// Metadata format v1 does not define any control chunk types,
|
||||
// they are currently ignored aka reserved.
|
||||
// In future they can be used to implement resumable uploads etc.
|
||||
//
|
||||
const (
|
||||
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
|
||||
tempSuffixFormat = `_%04s`
|
||||
@@ -515,7 +512,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
|
||||
strRegex := regexp.QuoteMeta(pattern)
|
||||
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
|
||||
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
|
||||
strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr)
|
||||
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
|
||||
f.nameRegexp = regexp.MustCompile(strRegex)
|
||||
|
||||
@@ -524,7 +521,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
if numDigits > 1 {
|
||||
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
|
||||
}
|
||||
strFmt := strings.Replace(pattern, "%", "%%", -1)
|
||||
strFmt := strings.ReplaceAll(pattern, "%", "%%")
|
||||
strFmt = strings.Replace(strFmt, "*", "%s", 1)
|
||||
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
|
||||
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
|
||||
@@ -542,7 +539,6 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
//
|
||||
// xactID is a transaction identifier. Empty xactID denotes active chunk,
|
||||
// otherwise temporary chunk name is produced.
|
||||
//
|
||||
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
|
||||
dir, parentName := path.Split(filePath)
|
||||
var name, tempSuffix string
|
||||
@@ -708,7 +704,6 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
||||
// directory together with dead chunks.
|
||||
// In future a flag named like `--chunker-list-hidden` may be added to
|
||||
// rclone that will tell List to reveal hidden chunks.
|
||||
//
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.base.List(ctx, dir)
|
||||
if err != nil {
|
||||
@@ -868,7 +863,6 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
// Note that chunker prefers analyzing file names rather than reading
|
||||
// the content of meta object assuming that directory scans are fast
|
||||
// but opening even a small file can be slow on some backends.
|
||||
//
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.scanObject(ctx, remote, false)
|
||||
}
|
||||
@@ -1043,7 +1037,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metadata, err := ioutil.ReadAll(reader)
|
||||
metadata, err := io.ReadAll(reader)
|
||||
_ = reader.Close() // ensure file handle is freed on windows
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1084,7 +1078,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
|
||||
// readXactID returns the transaction ID stored in the passed metadata object
|
||||
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
// if xactID has already been read and cahced return it now
|
||||
// if xactID has already been read and cached return it now
|
||||
if o.xIDCached {
|
||||
return o.xactID, nil
|
||||
}
|
||||
@@ -1102,7 +1096,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
data, err := io.ReadAll(reader)
|
||||
_ = reader.Close() // ensure file handle is freed on windows
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1586,7 +1580,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// This command will chain to `purge` from wrapped remote.
|
||||
// As a result it removes not only composite chunker files with their
|
||||
// active chunks but also all hidden temporary chunks in the directory.
|
||||
//
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
do := f.base.Features().Purge
|
||||
if do == nil {
|
||||
@@ -1628,7 +1621,6 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// Unsupported control chunks will get re-picked by a more recent
|
||||
// rclone version with unexpected results. This can be helped by
|
||||
// the `delete hidden` flag above or at least the user has been warned.
|
||||
//
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||
// operations.Move can still call Remove if chunker's Move refuses
|
||||
@@ -1804,9 +1796,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1825,9 +1817,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1895,7 +1887,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.base.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -1904,7 +1896,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.base.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -2125,7 +2117,6 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
// file, then tries to read it from metadata. This in theory
|
||||
// handles the unusual case when a small file has been tampered
|
||||
// on the level of wrapped remote but chunker is unaware of that.
|
||||
//
|
||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
@@ -2414,7 +2405,6 @@ type metaSimpleJSON struct {
|
||||
// - for files larger than chunk size
|
||||
// - if file contents can be mistaken as meta object
|
||||
// - if consistent hashing is On but wrapped remote can't provide given hash
|
||||
//
|
||||
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
|
||||
version := metadataVersion
|
||||
if xactID == "" && version == 2 {
|
||||
@@ -2447,7 +2437,6 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
|
||||
// New format will have a higher version number and cannot be correctly
|
||||
// handled by current implementation.
|
||||
// The version check below will then explicitly ask user to upgrade rclone.
|
||||
//
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||
// Be strict about JSON format
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -59,7 +59,7 @@ var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
@@ -440,7 +440,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
checkSmallFile := func(name, contents string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||
put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||
assert.NotNil(t, put)
|
||||
checkSmallFileInternals(put)
|
||||
checkContents(put, contents)
|
||||
@@ -489,7 +489,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
|
||||
newFile := func(name string) fs.Object {
|
||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj
|
||||
}
|
||||
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
assert.NoError(t, err)
|
||||
var chunkContents []byte
|
||||
assert.NotPanics(t, func() {
|
||||
chunkContents, err = ioutil.ReadAll(r)
|
||||
chunkContents, err = io.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
r, err = willyChunk.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotPanics(t, func() {
|
||||
_, err = ioutil.ReadAll(r)
|
||||
_, err = io.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
@@ -599,7 +599,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
||||
filename = path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
||||
txnID = chunkObj.xactID
|
||||
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
assert.NoError(t, err, "open "+description)
|
||||
assert.NotNil(t, r, "open stream of "+description)
|
||||
if err == nil && r != nil {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err, "read all of "+description)
|
||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||
_ = r.Close()
|
||||
@@ -716,7 +716,7 @@ func testFutureProof(t *testing.T, f *Fs) {
|
||||
name = f.makeChunkName(name, part-1, "", "")
|
||||
}
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||
assert.NotNil(t, obj, msg)
|
||||
}
|
||||
|
||||
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
|
||||
assert.Error(t, err)
|
||||
|
||||
// Rcat must fail
|
||||
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||
in := io.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
|
||||
assert.Nil(t, robj)
|
||||
assert.NotNil(t, err)
|
||||
if err != nil {
|
||||
@@ -790,7 +790,7 @@ func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
}
|
||||
@@ -844,7 +844,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
||||
contents := "abcdef"
|
||||
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||
|
||||
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
||||
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
||||
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
r, err := dstFile.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
|
||||
@@ -35,6 +35,7 @@ func TestIntegration(t *testing.T) {
|
||||
"MimeType",
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
"Metadata",
|
||||
},
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
@@ -53,6 +54,7 @@ func TestIntegration(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "chunker"},
|
||||
{Name: name, Key: "remote", Value: tempDir},
|
||||
}
|
||||
opt.QuickTestOK = true
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
1105
backend/combine/combine.go
Normal file
1105
backend/combine/combine.go
Normal file
File diff suppressed because it is too large
Load Diff
94
backend/combine/combine_internal_test.go
Normal file
94
backend/combine/combine_internal_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package combine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAdjustmentDo(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
root string
|
||||
mountpoint string
|
||||
in string
|
||||
want string
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "mountpoint/path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "wrongpath/to/file.txt",
|
||||
want: "",
|
||||
wantErr: errNotUnderRoot,
|
||||
},
|
||||
} {
|
||||
what := fmt.Sprintf("%+v", test)
|
||||
a := newAdjustment(test.root, test.mountpoint)
|
||||
got, gotErr := a.do(test.in)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAdjustmentUndo(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
root string
|
||||
mountpoint string
|
||||
in string
|
||||
want string
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
mountpoint: "mountpoint",
|
||||
in: "mountpoint/path/to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "wrongmountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "to/file.txt",
|
||||
want: "",
|
||||
wantErr: errNotUnderRoot,
|
||||
},
|
||||
} {
|
||||
what := fmt.Sprintf("%+v", test)
|
||||
a := newAdjustment(test.root, test.mountpoint)
|
||||
got, gotErr := a.undo(test.in)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
|
||||
}
|
||||
92
backend/combine/combine_test.go
Normal file
92
backend/combine/combine_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
// Test Combine filesystem interface
|
||||
package combine_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocal(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := MakeTestDirs(t, 3)
|
||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
|
||||
name := "TestCombineLocal"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
func TestMemory(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
|
||||
name := "TestCombineMemory"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
func TestMixed(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := MakeTestDirs(t, 2)
|
||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
|
||||
name := "TestCombineMixed"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
// MakeTestDirs makes directories in /tmp for testing
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
||||
for i := 1; i <= n; i++ {
|
||||
dir := t.TempDir()
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -29,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
@@ -53,7 +53,7 @@ const (
|
||||
Gzip = 2
|
||||
)
|
||||
|
||||
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
|
||||
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
@@ -70,6 +70,9 @@ func init() {
|
||||
Name: "compress",
|
||||
Description: "Compress a remote",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to compress.",
|
||||
@@ -87,7 +90,7 @@ Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffmann encoding only. Only use if you know what you
|
||||
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||
are doing.
|
||||
Level 0 turns off compression.`,
|
||||
Default: sgzip.DefaultCompression,
|
||||
@@ -127,7 +130,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -180,6 +183,10 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
SetTier: true,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// We support reading MIME types no matter the wrapped fs
|
||||
f.features.ReadMimeType = true
|
||||
@@ -222,7 +229,7 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
||||
// Separate the filename and size from the extension
|
||||
extensionPos := strings.LastIndex(compressedFileName, ".")
|
||||
if extensionPos == -1 {
|
||||
return "", "", 0, errors.New("File name has no extension")
|
||||
return "", "", 0, errors.New("file name has no extension")
|
||||
}
|
||||
extension = compressedFileName[extensionPos:]
|
||||
nameWithSize := compressedFileName[:extensionPos]
|
||||
@@ -231,11 +238,11 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
||||
}
|
||||
match := nameRegexp.FindStringSubmatch(nameWithSize)
|
||||
if match == nil || len(match) != 3 {
|
||||
return "", "", 0, errors.New("Invalid filename")
|
||||
return "", "", 0, errors.New("invalid filename")
|
||||
}
|
||||
size, err := base64ToInt64(match[2])
|
||||
if err != nil {
|
||||
return "", "", 0, errors.New("Could not decode size")
|
||||
return "", "", 0, errors.New("could not decode size")
|
||||
}
|
||||
return match[1], gzFileExt, size, nil
|
||||
}
|
||||
@@ -304,7 +311,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
||||
case fs.Directory:
|
||||
f.addDir(&newEntries, x)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
@@ -361,13 +368,16 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta := readMetadata(ctx, mo)
|
||||
if meta == nil {
|
||||
return nil, errors.New("error decoding metadata")
|
||||
meta, err := readMetadata(ctx, mo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
||||
}
|
||||
// Create our Object
|
||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||
return f.newObject(o, mo, meta), err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o, mo, meta), nil
|
||||
}
|
||||
|
||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||
@@ -401,6 +411,10 @@ func isCompressible(r io.Reader) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
@@ -441,7 +455,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
||||
}
|
||||
|
||||
// Need to include what we allready read
|
||||
// Need to include what we already read
|
||||
in = &ReadCloserWrapper{
|
||||
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
||||
Closer: in,
|
||||
@@ -454,7 +468,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
|
||||
tempFile, err := ioutil.TempFile("", "rclone-press-")
|
||||
tempFile, err := os.CreateTemp("", "rclone-press-")
|
||||
defer func() {
|
||||
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
|
||||
// to ignore them
|
||||
@@ -462,10 +476,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
|
||||
return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err)
|
||||
}
|
||||
if _, err = io.Copy(tempFile, in); err != nil {
|
||||
return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
|
||||
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
||||
}
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
@@ -532,8 +546,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
removeErr := o.Remove(ctx)
|
||||
@@ -626,9 +640,11 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
|
||||
// Put the data
|
||||
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
|
||||
if err != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
if mo != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -665,7 +681,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(dataObject, mo, meta), err
|
||||
return f.newObject(dataObject, mo, meta), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
@@ -714,23 +730,23 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
|
||||
err = oldObj.(*Object).Object.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could remove original object: %w", err)
|
||||
return nil, fmt.Errorf("couldn't remove original object: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// If our new object is compressed we have to rename it with the correct size.
|
||||
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
|
||||
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
|
||||
if compressible {
|
||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
|
||||
return nil, fmt.Errorf("couldn't rename streamed object: %w", err)
|
||||
}
|
||||
newObj.Object = wrapObj
|
||||
}
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||
// will break stuff. Right no I can't think of a way to make this work.
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
@@ -773,9 +789,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -823,9 +839,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -900,7 +916,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp: not supported by underlying remote")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -909,7 +925,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("can't About: not supported by underlying remote")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -1028,24 +1044,19 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
|
||||
}
|
||||
|
||||
// This function will read the metadata from a metadata object.
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
||||
// Open our meradata object
|
||||
rc, err := mo.Open(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err := rc.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(mo, "Error closing object: %v", err)
|
||||
}
|
||||
}()
|
||||
defer fs.CheckClose(rc, &err)
|
||||
jr := json.NewDecoder(rc)
|
||||
meta = new(ObjectMetadata)
|
||||
if err = jr.Decode(meta); err != nil {
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
return meta
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// Remove removes this object
|
||||
@@ -1090,6 +1101,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
origName := o.Remote()
|
||||
if o.meta.Mode != Uncompressed || compressible {
|
||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newObject.Object.Remote() != o.Object.Remote() {
|
||||
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
||||
return removeErr
|
||||
@@ -1103,9 +1117,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// If we are, just update the object and metadata
|
||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Update object metadata and return
|
||||
o.Object = newObject.Object
|
||||
@@ -1116,6 +1130,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
||||
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
||||
if o == nil {
|
||||
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
@@ -1128,6 +1145,9 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
|
||||
|
||||
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
||||
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
||||
if o == nil {
|
||||
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
@@ -1155,7 +1175,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
if o.meta == nil {
|
||||
o.meta = readMetadata(ctx, o.mo)
|
||||
o.meta, err = readMetadata(ctx, o.mo)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1208,6 +1228,21 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.meta.MimeType
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
err := o.loadMetadataIfNotLoaded(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do, ok := o.mo.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
@@ -1354,6 +1389,51 @@ func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", nil // cannot know the checksum
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *ObjectInfo) ID() string {
|
||||
do, ok := o.src.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
||||
do, ok := o.src.(fs.MimeTyper)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.MimeType(ctx)
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
||||
return fs.UnWrapObjectInfo(o.src)
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.src.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *ObjectInfo) GetTier() string {
|
||||
do, ok := o.src.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
@@ -1406,11 +1486,6 @@ var (
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -61,5 +61,6 @@ func TestRemoteGzip(t *testing.T) {
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
@@ -37,7 +38,6 @@ const (
|
||||
blockHeaderSize = secretbox.Overhead
|
||||
blockDataSize = 64 * 1024
|
||||
blockSize = blockHeaderSize + blockDataSize
|
||||
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
|
||||
)
|
||||
|
||||
// Errors returned by cipher
|
||||
@@ -53,8 +53,9 @@ var (
|
||||
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
||||
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
||||
ErrorFileClosed = errors.New("file already closed")
|
||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
|
||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
|
||||
ErrorBadSeek = errors.New("Seek beyond end of file")
|
||||
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
|
||||
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
||||
obfuscQuoteRune = '!'
|
||||
)
|
||||
@@ -96,7 +97,7 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
|
||||
case "obfuscate":
|
||||
mode = NameEncryptionObfuscated
|
||||
default:
|
||||
err = fmt.Errorf("Unknown file name encryption mode %q", s)
|
||||
err = fmt.Errorf("unknown file name encryption mode %q", s)
|
||||
}
|
||||
return mode, err
|
||||
}
|
||||
@@ -127,11 +128,11 @@ type fileNameEncoding interface {
|
||||
// RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
// - it becomes lower case (no-one likes upper case filenames!)
|
||||
// - we strip the padding character `=`
|
||||
type caseInsensitiveBase32Encoding struct{}
|
||||
|
||||
// EncodeToString encodes a strign using the modified version of
|
||||
// EncodeToString encodes a string using the modified version of
|
||||
// base32 encoding.
|
||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(src)
|
||||
@@ -162,34 +163,37 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||
case "base32768":
|
||||
enc = base32768.SafeEncoding
|
||||
default:
|
||||
err = fmt.Errorf("Unknown file name encoding mode %q", s)
|
||||
err = fmt.Errorf("unknown file name encoding mode %q", s)
|
||||
}
|
||||
return enc, err
|
||||
}
|
||||
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
passBadBlocks bool // if set passed bad blocks as zeroed blocks
|
||||
encryptedSuffix string
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
encryptedSuffix: ".bin",
|
||||
}
|
||||
c.buffers.New = func() interface{} {
|
||||
return make([]byte, blockSize)
|
||||
return new([blockSize]byte)
|
||||
}
|
||||
err := c.Key(password, salt)
|
||||
if err != nil {
|
||||
@@ -198,11 +202,29 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// setEncryptedSuffix set suffix, or an empty string
|
||||
func (c *Cipher) setEncryptedSuffix(suffix string) {
|
||||
if strings.EqualFold(suffix, "none") {
|
||||
c.encryptedSuffix = ""
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(suffix, ".") {
|
||||
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
|
||||
suffix = "." + suffix
|
||||
}
|
||||
c.encryptedSuffix = suffix
|
||||
}
|
||||
|
||||
// Call to set bad block pass through
|
||||
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
|
||||
c.passBadBlocks = passBadBlocks
|
||||
}
|
||||
|
||||
// Key creates all the internal keys from the password passed in using
|
||||
// scrypt.
|
||||
//
|
||||
// If salt is "" we use a fixed salt just to make attackers lives
|
||||
// slighty harder than using no salt.
|
||||
// slightly harder than using no salt.
|
||||
//
|
||||
// Note that empty password makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
@@ -230,21 +252,18 @@ func (c *Cipher) Key(password, salt string) (err error) {
|
||||
}
|
||||
|
||||
// getBlock gets a block from the pool of size blockSize
|
||||
func (c *Cipher) getBlock() []byte {
|
||||
return c.buffers.Get().([]byte)
|
||||
func (c *Cipher) getBlock() *[blockSize]byte {
|
||||
return c.buffers.Get().(*[blockSize]byte)
|
||||
}
|
||||
|
||||
// putBlock returns a block to the pool of size blockSize
|
||||
func (c *Cipher) putBlock(buf []byte) {
|
||||
if len(buf) != blockSize {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
// encryptSegment encrypts a path segment
|
||||
//
|
||||
// This uses EME with AES
|
||||
// This uses EME with AES.
|
||||
//
|
||||
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
||||
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
||||
@@ -254,8 +273,8 @@ func (c *Cipher) putBlock(buf []byte) {
|
||||
// same filename must encrypt to the same thing.
|
||||
//
|
||||
// This means that
|
||||
// * filenames with the same name will encrypt the same
|
||||
// * filenames which start the same won't have a common prefix
|
||||
// - filenames with the same name will encrypt the same
|
||||
// - filenames which start the same won't have a common prefix
|
||||
func (c *Cipher) encryptSegment(plaintext string) string {
|
||||
if plaintext == "" {
|
||||
return ""
|
||||
@@ -508,7 +527,7 @@ func (c *Cipher) encryptFileName(in string) string {
|
||||
// EncryptFileName encrypts a file path
|
||||
func (c *Cipher) EncryptFileName(in string) string {
|
||||
if c.mode == NameEncryptionOff {
|
||||
return in + encryptedSuffix
|
||||
return in + c.encryptedSuffix
|
||||
}
|
||||
return c.encryptFileName(in)
|
||||
}
|
||||
@@ -568,8 +587,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
// DecryptFileName decrypts a file path
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||
remainingLength := len(in) - len(c.encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
decrypted := in[:remainingLength]
|
||||
@@ -609,7 +628,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
||||
// fromReader fills the nonce from an io.Reader - normally the OSes
|
||||
// crypto random number generator
|
||||
func (n *nonce) fromReader(in io.Reader) error {
|
||||
read, err := io.ReadFull(in, (*n)[:])
|
||||
read, err := readers.ReadFill(in, (*n)[:])
|
||||
if read != fileNonceSize {
|
||||
return fmt.Errorf("short read of nonce: %w", err)
|
||||
}
|
||||
@@ -664,8 +683,8 @@ type encrypter struct {
|
||||
in io.Reader
|
||||
c *Cipher
|
||||
nonce nonce
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -690,9 +709,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
||||
}
|
||||
}
|
||||
// Copy magic into buffer
|
||||
copy(fh.buf, fileMagicBytes)
|
||||
copy((*fh.buf)[:], fileMagicBytes)
|
||||
// Copy nonce into buffer
|
||||
copy(fh.buf[fileMagicSize:], fh.nonce[:])
|
||||
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
@@ -707,22 +726,20 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.bufIndex >= fh.bufSize {
|
||||
// Read data
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := fh.readBuf[:blockDataSize]
|
||||
n, err = io.ReadFull(fh.in, readBuf)
|
||||
readBuf := (*fh.readBuf)[:blockDataSize]
|
||||
n, err = readers.ReadFill(fh.in, readBuf)
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return fh.finish(err)
|
||||
}
|
||||
// possibly err != nil here, but we will process the
|
||||
// data and the next call to ReadFull will return 0, err
|
||||
// data and the next call to ReadFill will return 0, err
|
||||
// Encrypt the block using the nonce
|
||||
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = blockHeaderSize + n
|
||||
fh.nonce.increment()
|
||||
}
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
|
||||
fh.bufIndex += n
|
||||
return n, nil
|
||||
}
|
||||
@@ -763,8 +780,8 @@ type decrypter struct {
|
||||
nonce nonce
|
||||
initialNonce nonce
|
||||
c *Cipher
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -782,12 +799,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
limit: -1,
|
||||
}
|
||||
// Read file header (magic + nonce)
|
||||
readBuf := fh.readBuf[:fileHeaderSize]
|
||||
_, err := io.ReadFull(fh.rc, readBuf)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
readBuf := (*fh.readBuf)[:fileHeaderSize]
|
||||
n, err := readers.ReadFill(fh.rc, readBuf)
|
||||
if n < fileHeaderSize && err == io.EOF {
|
||||
// This read from 0..fileHeaderSize-1 bytes
|
||||
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
||||
} else if err != nil {
|
||||
} else if err != io.EOF && err != nil {
|
||||
return nil, fh.finishAndClose(err)
|
||||
}
|
||||
// check the magic
|
||||
@@ -845,10 +862,8 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
|
||||
func (fh *decrypter) fillBuffer() (err error) {
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := fh.readBuf
|
||||
n, err := io.ReadFull(fh.rc, readBuf)
|
||||
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return err
|
||||
}
|
||||
// possibly err != nil here, but we will process the data and
|
||||
@@ -856,18 +871,25 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
|
||||
// Check header + 1 byte exists
|
||||
if n <= blockHeaderSize {
|
||||
if err != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedFileBadHeader
|
||||
}
|
||||
// Decrypt the block using the nonce
|
||||
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
if !ok {
|
||||
if err != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedBadBlock
|
||||
if !fh.c.passBadBlocks {
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
(*fh.buf)[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = n - blockHeaderSize
|
||||
@@ -893,7 +915,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
||||
toCopy = int(fh.limit)
|
||||
}
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
fh.bufIndex += n
|
||||
if fh.limit >= 0 {
|
||||
fh.limit -= int64(n)
|
||||
@@ -904,9 +926,8 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// calculateUnderlying converts an (offset, limit) in a crypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying
|
||||
// file.
|
||||
// calculateUnderlying converts an (offset, limit) in an encrypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying file.
|
||||
//
|
||||
// It also returns number of bytes to discard after reading the first
|
||||
// block and number of blocks this is from the start so the nonce can
|
||||
@@ -1085,7 +1106,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
|
||||
// DecryptDataSeek decrypts the data stream from offset
|
||||
//
|
||||
// The open function must return a ReadCloser opened to the offset supplied
|
||||
// The open function must return a ReadCloser opened to the offset supplied.
|
||||
//
|
||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -28,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
|
||||
{"off", NameEncryptionOff, ""},
|
||||
{"standard", NameEncryptionStandard, ""},
|
||||
{"obfuscate", NameEncryptionObfuscated, ""},
|
||||
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
|
||||
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
|
||||
} {
|
||||
actual, actualErr := NewNameEncryptionMode(test.in)
|
||||
assert.Equal(t, actual, test.expected)
|
||||
if test.expectedErr == "" {
|
||||
assert.NoError(t, actualErr)
|
||||
} else {
|
||||
assert.Error(t, actualErr, test.expectedErr)
|
||||
assert.EqualError(t, actualErr, test.expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -406,6 +405,13 @@ func TestNonStandardEncryptFileName(t *testing.T) {
|
||||
// Off mode
|
||||
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Off mode with custom suffix
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
c.setEncryptedSuffix(".jpg")
|
||||
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
|
||||
// Off mode with empty suffix
|
||||
c.setEncryptedSuffix("none")
|
||||
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
@@ -484,21 +490,27 @@ func TestNonStandardDecryptFileName(t *testing.T) {
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
customSuffix string
|
||||
}{
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"},
|
||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
if test.customSuffix != "" {
|
||||
c.setEncryptedSuffix(test.customSuffix)
|
||||
}
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
@@ -727,7 +739,7 @@ func TestNonceFromReader(t *testing.T) {
|
||||
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
|
||||
buf = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
err = x.fromReader(buf)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
}
|
||||
|
||||
func TestNonceFromBuf(t *testing.T) {
|
||||
@@ -1051,7 +1063,7 @@ func TestRandomSource(t *testing.T) {
|
||||
_, _ = source.Read(buf)
|
||||
sink = newRandomSource(1e8)
|
||||
_, err = io.Copy(sink, source)
|
||||
assert.Error(t, err, "Error in stream")
|
||||
assert.EqualError(t, err, "Error in stream at 1")
|
||||
}
|
||||
|
||||
type zeroes struct{}
|
||||
@@ -1073,7 +1085,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
source := newRandomSource(copySize)
|
||||
encrypted, err := c.newEncrypter(source, nil)
|
||||
assert.NoError(t, err)
|
||||
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
|
||||
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
|
||||
assert.NoError(t, err)
|
||||
sink := newRandomSource(copySize)
|
||||
n, err := io.CopyBuffer(sink, decrypted, buf)
|
||||
@@ -1144,15 +1156,15 @@ func TestEncryptData(t *testing.T) {
|
||||
buf := bytes.NewBuffer(test.in)
|
||||
encrypted, err := c.EncryptData(buf)
|
||||
assert.NoError(t, err)
|
||||
out, err := ioutil.ReadAll(encrypted)
|
||||
out, err := io.ReadAll(encrypted)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expected, out)
|
||||
|
||||
// Check we can decode the data properly too...
|
||||
buf = bytes.NewBuffer(out)
|
||||
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
|
||||
decrypted, err := c.DecryptData(io.NopCloser(buf))
|
||||
assert.NoError(t, err)
|
||||
out, err = ioutil.ReadAll(decrypted)
|
||||
out, err = io.ReadAll(decrypted)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.in, out)
|
||||
}
|
||||
@@ -1168,13 +1180,13 @@ func TestNewEncrypter(t *testing.T) {
|
||||
fh, err := c.newEncrypter(z, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
|
||||
|
||||
// Test error path
|
||||
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
fh, err = c.newEncrypter(z, nil)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
}
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||
@@ -1187,7 +1199,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newEncrypter(in, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
n, err := io.CopyN(io.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(32), n)
|
||||
}
|
||||
@@ -1225,7 +1237,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
|
||||
@@ -1233,7 +1245,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd = newCloseDetector(er)
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "potato")
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// bad magic
|
||||
@@ -1244,7 +1256,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0copy))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
file0copy[i] ^= 0x1
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
@@ -1257,12 +1269,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
|
||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
in1 := bytes.NewBuffer(file16)
|
||||
in := ioutil.NopCloser(io.MultiReader(in1, in2))
|
||||
in := io.NopCloser(io.MultiReader(in1, in2))
|
||||
|
||||
fh, err := c.newDecrypter(in)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
n, err := io.CopyN(io.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(16), n)
|
||||
}
|
||||
@@ -1274,14 +1286,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
|
||||
// Make random data
|
||||
const dataSize = 150000
|
||||
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
|
||||
plaintext, err := io.ReadAll(newRandomSource(dataSize))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Encrypt the data
|
||||
buf := bytes.NewBuffer(plaintext)
|
||||
encrypted, err := c.EncryptData(buf)
|
||||
assert.NoError(t, err)
|
||||
ciphertext, err := ioutil.ReadAll(encrypted)
|
||||
ciphertext, err := io.ReadAll(encrypted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
|
||||
@@ -1300,7 +1312,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
end = len(ciphertext)
|
||||
}
|
||||
}
|
||||
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
@@ -1490,14 +1502,16 @@ func TestDecrypterRead(t *testing.T) {
|
||||
assert.NoError(t, err, what)
|
||||
continue
|
||||
}
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
_, err = io.ReadAll(fh)
|
||||
var expectedErr error
|
||||
switch {
|
||||
case i == fileHeaderSize:
|
||||
// This would normally produce an error *except* on the first block
|
||||
expectedErr = nil
|
||||
case i <= fileHeaderSize+blockHeaderSize:
|
||||
expectedErr = ErrorEncryptedFileBadHeader
|
||||
default:
|
||||
expectedErr = io.ErrUnexpectedEOF
|
||||
expectedErr = ErrorEncryptedBadBlock
|
||||
}
|
||||
if expectedErr != nil {
|
||||
assert.EqualError(t, err, expectedErr.Error(), what)
|
||||
@@ -1514,8 +1528,8 @@ func TestDecrypterRead(t *testing.T) {
|
||||
cd := newCloseDetector(in)
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.NoError(t, err)
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
assert.Error(t, err, "potato")
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
// Test corrupting the input
|
||||
@@ -1524,17 +1538,28 @@ func TestDecrypterRead(t *testing.T) {
|
||||
copy(file16copy, file16)
|
||||
for i := range file16copy {
|
||||
file16copy[i] ^= 0xFF
|
||||
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
if i < fileMagicSize {
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Nil(t, fh)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
|
||||
}
|
||||
file16copy[i] ^= 0xFF
|
||||
}
|
||||
|
||||
// Test that we can corrupt a byte and read zeroes if
|
||||
// passBadBlocks is set
|
||||
copy(file16copy, file16)
|
||||
file16copy[len(file16copy)-1] ^= 0xFF
|
||||
c.passBadBlocks = true
|
||||
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
assert.NoError(t, err)
|
||||
buf, err := io.ReadAll(fh)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, make([]byte, 16), buf)
|
||||
}
|
||||
|
||||
func TestDecrypterClose(t *testing.T) {
|
||||
@@ -1555,7 +1580,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
|
||||
// double close
|
||||
err = fh.Close()
|
||||
assert.Error(t, err, ErrorFileClosed.Error())
|
||||
assert.EqualError(t, err, ErrorFileClosed.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// try again reading the file this time
|
||||
@@ -1565,7 +1590,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
// close after reading
|
||||
out, err := ioutil.ReadAll(fh)
|
||||
out, err := io.ReadAll(fh)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte{1}, out)
|
||||
assert.Equal(t, io.EOF, fh.err)
|
||||
@@ -1582,8 +1607,6 @@ func TestPutGetBlock(t *testing.T) {
|
||||
block := c.getBlock()
|
||||
c.putBlock(block)
|
||||
c.putBlock(block)
|
||||
|
||||
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
|
||||
@@ -28,6 +28,9 @@ func init() {
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
@@ -45,7 +48,7 @@ func init() {
|
||||
Help: "Very simple filename obfuscation.",
|
||||
}, {
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -76,7 +79,9 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
|
||||
Normally this option is not what you want, but if you have two crypts
|
||||
pointing to the same backend you can use it.
|
||||
@@ -116,13 +121,22 @@ names, or for debugging purposes.`,
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "pass_bad_blocks",
|
||||
Help: `If set this will pass bad blocks through as all 0.
|
||||
|
||||
This should not be set in normal operation, it should only be set if
|
||||
trying to recover an encrypted file with errors and it is desired to
|
||||
recover as much of the file as possible.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
|
||||
This option could help with shortening the encrypted filename. The
|
||||
suitable option would depend on the way your remote count the filename
|
||||
length and if it's case sensitve.`,
|
||||
length and if it's case sensitive.`,
|
||||
Default: "base32",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
@@ -135,10 +149,18 @@ length and if it's case sensitve.`,
|
||||
},
|
||||
{
|
||||
Value: "base32768",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "suffix",
|
||||
Help: `If this is set it will override the default suffix of ".bin".
|
||||
|
||||
Setting suffix to "none" will result in an empty suffix. This may be useful
|
||||
when the path length is critical.`,
|
||||
Default: ".bin",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -171,6 +193,8 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
}
|
||||
cipher.setEncryptedSuffix(opt.Suffix)
|
||||
cipher.setPassBadBlocks(opt.PassBadBlocks)
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
@@ -232,7 +256,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
@@ -241,6 +265,10 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
@@ -256,7 +284,9 @@ type Options struct {
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
Suffix string `config:"suffix"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
@@ -328,7 +358,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
||||
case fs.Directory:
|
||||
f.addDir(ctx, &newEntries, x)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
@@ -390,6 +420,8 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
if f.opt.NoDataEncryption {
|
||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
if err == nil && o != nil {
|
||||
@@ -407,6 +439,9 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
if ci.IgnoreChecksum {
|
||||
ht = hash.None
|
||||
}
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
@@ -443,7 +478,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
@@ -501,9 +536,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -526,9 +561,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -597,7 +632,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -606,7 +641,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -1041,10 +1076,11 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
// Get the underlying object if there is one
|
||||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||
// Prefer direct interface assertion
|
||||
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||
// Otherwise likely is an operations.OverrideRemote
|
||||
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
|
||||
// Unwrap if it is an operations.OverrideRemote
|
||||
srcObj = do.UnWrap()
|
||||
} else {
|
||||
// Otherwise don't unwrap any further
|
||||
return "", nil
|
||||
}
|
||||
// if this is wrapping a local object then we work out the hash
|
||||
@@ -1056,6 +1092,50 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *ObjectInfo) GetTier() string {
|
||||
do, ok := o.ObjectInfo.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *ObjectInfo) ID() string {
|
||||
do, ok := o.ObjectInfo.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.ObjectInfo.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
//
|
||||
// This is deliberately unsupported so we don't leak mime type info by
|
||||
// default.
|
||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
||||
return fs.UnWrapObjectInfo(o.ObjectInfo)
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
@@ -1084,6 +1164,26 @@ func (o *Object) GetTier() string {
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.Object.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
//
|
||||
// This is deliberately unsupported so we don't leak mime type info by
|
||||
// default.
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1106,10 +1206,6 @@ var (
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -17,41 +17,28 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testWrapper struct {
|
||||
fs.ObjectInfo
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or nil if it
|
||||
// isn't wrapping anything
|
||||
func (o testWrapper) UnWrap() fs.Object {
|
||||
if o, ok := o.ObjectInfo.(fs.Object); ok {
|
||||
return o
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a temporary local fs to upload things from
|
||||
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
|
||||
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||
}
|
||||
return localFs, cleanup
|
||||
})
|
||||
return localFs
|
||||
}
|
||||
|
||||
// Upload a file to a remote
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
||||
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
||||
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, obj.Remove(context.Background()))
|
||||
}
|
||||
return obj, cleanup
|
||||
})
|
||||
return obj
|
||||
}
|
||||
|
||||
// Test the ObjectInfo
|
||||
@@ -65,11 +52,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
path = "_wrap"
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
localFs := makeTempLocalFs(t)
|
||||
|
||||
obj, cleanupObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupObj()
|
||||
obj := uploadFile(t, localFs, path, contents)
|
||||
|
||||
// encrypt the data
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
@@ -83,7 +68,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
var oi fs.ObjectInfo = obj
|
||||
if wrap {
|
||||
// wrap the object in an fs.ObjectUnwrapper if required
|
||||
oi = testWrapper{oi}
|
||||
oi = fs.NewOverrideRemote(oi, "new_remote")
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
@@ -91,7 +76,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
src := f.newObjectInfo(oi, nonce)
|
||||
|
||||
// Test ObjectInfo methods
|
||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||
if !f.opt.NoDataEncryption {
|
||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||
}
|
||||
assert.Equal(t, f, src.Fs())
|
||||
assert.NotEqual(t, path, src.Remote())
|
||||
|
||||
@@ -114,16 +101,13 @@ func testComputeHash(t *testing.T, f *Fs) {
|
||||
t.Skipf("%v: does not support hashes", f.Fs)
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
localFs := makeTempLocalFs(t)
|
||||
|
||||
// Upload a file to localFs as a test object
|
||||
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupLocalObj()
|
||||
localObj := uploadFile(t, localFs, path, contents)
|
||||
|
||||
// Upload the same data to the remote Fs also
|
||||
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
|
||||
defer cleanupRemoteObj()
|
||||
remoteObj := uploadFile(t, f, path, contents)
|
||||
|
||||
// Calculate the expected Hash of the remote object
|
||||
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
||||
|
||||
@@ -4,6 +4,7 @@ package crypt_test
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
@@ -46,6 +47,7 @@ func TestStandardBase32(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -67,6 +69,7 @@ func TestStandardBase64(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -88,6 +91,7 @@ func TestStandardBase32768(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -109,6 +113,7 @@ func TestOff(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -117,6 +122,9 @@ func TestObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -131,6 +139,7 @@ func TestObfuscate(t *testing.T) {
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -139,6 +148,9 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt4"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -154,5 +166,6 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,11 +8,11 @@ import "errors"
|
||||
|
||||
// Errors Unpad can return
|
||||
var (
|
||||
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
|
||||
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
|
||||
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
|
||||
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
|
||||
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
|
||||
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded")
|
||||
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize")
|
||||
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long")
|
||||
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short")
|
||||
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same")
|
||||
)
|
||||
|
||||
// Pad buf using PKCS#7 to a multiple of n.
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -50,6 +50,7 @@ import (
|
||||
drive_v2 "google.golang.org/api/drive/v2"
|
||||
drive "google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -70,7 +71,7 @@ const (
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
||||
defaultChunkSize = 8 * fs.Mebi
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
defaultXDGIcon = "text-html"
|
||||
@@ -84,7 +85,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
_mimeTypeToExtensionDuplicates = map[string]string{
|
||||
"application/x-vnd.oasis.opendocument.presentation": ".odp",
|
||||
@@ -201,7 +202,7 @@ func init() {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
|
||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
||||
OAuth2Config: driveConfig,
|
||||
})
|
||||
@@ -276,6 +277,7 @@ Leave blank normally.
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
@@ -299,6 +301,17 @@ a non root folder as its starting point.
|
||||
Default: true,
|
||||
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_shortcut_content",
|
||||
Default: false,
|
||||
Help: `Server side copy contents of shortcuts instead of the shortcut.
|
||||
|
||||
When doing server side copies, normally rclone will copy shortcuts as
|
||||
shortcuts.
|
||||
|
||||
If this flag is used then rclone will copy the contents of shortcuts
|
||||
rather than shortcuts themselves when doing server side copies.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_gdocs",
|
||||
Default: false,
|
||||
@@ -438,7 +451,11 @@ If downloading a file returns the error "This file has been identified
|
||||
as malware or spam and cannot be downloaded" with the error code
|
||||
"cannotDownloadAbusiveFile" then supply this flag to rclone to
|
||||
indicate you acknowledge the risks of downloading the file and rclone
|
||||
will download it anyway.`,
|
||||
will download it anyway.
|
||||
|
||||
Note that if you are using service account it will need Manager
|
||||
permission (not Content Manager) to for this flag to work. If the SA
|
||||
does not have the right permission, Google will just ignore the flag.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "keep_revision_forever",
|
||||
@@ -482,7 +499,9 @@ need to use --ignore size also.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||
|
||||
This can be useful if you wish to do a server-side copy between two
|
||||
different Google drives. Note that this isn't enabled by default
|
||||
@@ -545,6 +564,35 @@ If this flag is set then rclone will ignore shortcut files completely.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "skip_dangling_shortcuts",
|
||||
Help: `If set skip dangling shortcut files.
|
||||
|
||||
If this is set then rclone will not show any dangling shortcuts in listings.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "resource_key",
|
||||
Help: `Resource key for accessing a link-shared file.
|
||||
|
||||
If you need to access files shared with a link like this
|
||||
|
||||
https://drive.google.com/drive/folders/XXX?resourcekey=YYY&usp=sharing
|
||||
|
||||
Then you will need to use the first part "XXX" as the "root_folder_id"
|
||||
and the second part "YYY" as the "resource_key" otherwise you will get
|
||||
404 not found errors when trying to access the directory.
|
||||
|
||||
See: https://developers.google.com/drive/api/guides/resource-keys
|
||||
|
||||
This resource key requirement only applies to a subset of old files.
|
||||
|
||||
Note also that opening the folder once in the web interface (with the
|
||||
user you've authenticated rclone with) seems to be enough so that the
|
||||
resource key is no needed.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -552,6 +600,18 @@ If this flag is set then rclone will ignore shortcut files completely.
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// Don't encode / as it's a valid name character in drive.
|
||||
Default: encoder.EncodeInvalidUtf8,
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
|
||||
@@ -578,6 +638,7 @@ type Options struct {
|
||||
TeamDriveID string `config:"team_drive"`
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
CopyShortcutContent bool `config:"copy_shortcut_content"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
@@ -604,7 +665,10 @@ type Options struct {
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -629,6 +693,7 @@ type Fs struct {
|
||||
grouping int32 // number of IDs to search at once in ListR - read with atomic
|
||||
listRmu *sync.Mutex // protects listRempties
|
||||
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
|
||||
dirResourceKeys *sync.Map // map directory ID to resource key
|
||||
}
|
||||
|
||||
type baseObject struct {
|
||||
@@ -639,6 +704,7 @@ type baseObject struct {
|
||||
mimeType string // The object MIME type
|
||||
bytes int64 // size of the object
|
||||
parents []string // IDs of the parent directories
|
||||
resourceKey *string // resourceKey is needed for link shared objects
|
||||
}
|
||||
type documentObject struct {
|
||||
baseObject
|
||||
@@ -710,6 +776,9 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
fs.Errorf(f, "Received Shared Drive file limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
@@ -779,6 +848,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||
parentsQuery := bytes.NewBufferString("(")
|
||||
var resourceKeys []string
|
||||
for _, dirID := range dirIDs {
|
||||
if dirID == "" {
|
||||
continue
|
||||
@@ -799,7 +869,12 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
|
||||
}
|
||||
resourceKey, hasResourceKey := f.dirResourceKeys.Load(dirID)
|
||||
if hasResourceKey {
|
||||
resourceKeys = append(resourceKeys, fmt.Sprintf("%s/%s", dirID, resourceKey))
|
||||
}
|
||||
}
|
||||
resourceKeysHeader := strings.Join(resourceKeys, ",")
|
||||
if parentsQuery.Len() > 1 {
|
||||
_ = parentsQuery.WriteByte(')')
|
||||
query = append(query, parentsQuery.String())
|
||||
@@ -808,8 +883,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if title != "" {
|
||||
searchTitle := f.opt.Enc.FromStandardName(title)
|
||||
// Escaping the backslash isn't documented but seems to work
|
||||
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||
searchTitle = strings.ReplaceAll(searchTitle, `\`, `\\`)
|
||||
searchTitle = strings.ReplaceAll(searchTitle, `'`, `\'`)
|
||||
|
||||
var titleQuery bytes.Buffer
|
||||
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
|
||||
@@ -863,7 +938,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
}
|
||||
list.SupportsAllDrives(true)
|
||||
list.IncludeItemsFromAllDrives(true)
|
||||
if f.isTeamDrive {
|
||||
if f.isTeamDrive && !f.opt.SharedWithMe {
|
||||
list.DriveId(f.opt.TeamDriveID)
|
||||
list.Corpora("drive")
|
||||
}
|
||||
@@ -871,6 +946,10 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
list.Spaces("appDataFolder")
|
||||
}
|
||||
// Add resource Keys if necessary
|
||||
if resourceKeysHeader != "" {
|
||||
list.Header().Add("X-Goog-Drive-Resource-Keys", resourceKeysHeader)
|
||||
}
|
||||
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
|
||||
|
||||
@@ -906,6 +985,11 @@ OUTER:
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("list: %w", err)
|
||||
}
|
||||
// leave the dangling shortcut out of the listings
|
||||
// we've already logged about the dangling shortcut in resolveShortcut
|
||||
if f.opt.SkipDanglingShortcuts && item.MimeType == shortcutMimeTypeDangling {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
@@ -1042,7 +1126,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
@@ -1053,6 +1137,12 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
oAuthClient, err = google.DefaultClient(ctx, scopes...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create client from environment: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||
if err != nil {
|
||||
@@ -1125,15 +1215,16 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||
m: m,
|
||||
grouping: listRGrouping,
|
||||
listRmu: new(sync.Mutex),
|
||||
listRempties: make(map[string]struct{}),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||
m: m,
|
||||
grouping: listRGrouping,
|
||||
listRmu: new(sync.Mutex),
|
||||
listRempties: make(map[string]struct{}),
|
||||
dirResourceKeys: new(sync.Map),
|
||||
}
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.fileFields = f.getFileFields()
|
||||
@@ -1143,17 +1234,18 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
FilterAware: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.New(f.client)
|
||||
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Drive client: %w", err)
|
||||
}
|
||||
|
||||
if f.opt.V2DownloadMinSize >= 0 {
|
||||
f.v2Svc, err = drive_v2.New(f.client)
|
||||
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||
}
|
||||
@@ -1195,6 +1287,11 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
||||
|
||||
// If resource key is set then cache it for the root folder id
|
||||
if f.opt.ResourceKey != "" {
|
||||
f.dirResourceKeys.Store(f.rootFolderID, f.opt.ResourceKey)
|
||||
}
|
||||
|
||||
// Parse extensions
|
||||
if f.opt.Extensions != "" {
|
||||
if f.opt.ExportExtensions != defaultExportExtensions {
|
||||
@@ -1293,12 +1390,16 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
}
|
||||
}
|
||||
}
|
||||
return &Object{
|
||||
o := &Object{
|
||||
baseObject: f.newBaseObject(remote, info),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
|
||||
md5sum: strings.ToLower(info.Md5Checksum),
|
||||
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
|
||||
}
|
||||
if info.ResourceKey != "" {
|
||||
o.resourceKey = &info.ResourceKey
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// newDocumentObject creates an fs.Object for a google docs drive.File
|
||||
@@ -1413,6 +1514,9 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1571,6 +1675,15 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
}
|
||||
}
|
||||
|
||||
// If using a link type export and a more specific export
|
||||
// hasn't been found all docs should be exported
|
||||
for _, _extension := range f.exportExtensions {
|
||||
_mimeType := mime.TypeByExtension(_extension)
|
||||
if isLinkMimeType(_mimeType) {
|
||||
return _extension, _mimeType, true
|
||||
}
|
||||
}
|
||||
|
||||
// else return empty
|
||||
return "", "", isDocument
|
||||
}
|
||||
@@ -1581,6 +1694,14 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", "", false)
|
||||
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
if item.Md5Checksum != "" {
|
||||
return
|
||||
}
|
||||
// Folders can't be documents
|
||||
if item.MimeType == driveFolderType {
|
||||
return
|
||||
}
|
||||
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
|
||||
if extension != "" {
|
||||
filename = item.Name + extension
|
||||
@@ -1971,7 +2092,7 @@ func splitID(compositeID string) (actualID, shortcutID string) {
|
||||
|
||||
// isShortcutID returns true if compositeID refers to a shortcut
|
||||
func isShortcutID(compositeID string) bool {
|
||||
return strings.IndexRune(compositeID, shortcutSeparator) >= 0
|
||||
return strings.ContainsRune(compositeID, shortcutSeparator)
|
||||
}
|
||||
|
||||
// actualID returns an actual ID from a composite ID
|
||||
@@ -2042,6 +2163,10 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File
|
||||
case item.MimeType == driveFolderType:
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, item.Id)
|
||||
// cache the resource key for later lookups
|
||||
if item.ResourceKey != "" {
|
||||
f.dirResourceKeys.Store(item.Id, item.ResourceKey)
|
||||
}
|
||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||
if len(item.Parents) > 0 {
|
||||
@@ -2083,7 +2208,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -2125,10 +2250,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
|
||||
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
|
||||
if exportExt == "" {
|
||||
return nil, fmt.Errorf("No export format found for %q", importMimeType)
|
||||
return nil, fmt.Errorf("no export format found for %q", importMimeType)
|
||||
}
|
||||
if exportExt != srcExt && !f.opt.AllowImportNameChange {
|
||||
return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
||||
return nil, fmt.Errorf("can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2317,9 +2442,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -2374,16 +2499,24 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
createInfo.Description = ""
|
||||
}
|
||||
|
||||
// get the ID of the thing to copy - this is the shortcut if available
|
||||
// get the ID of the thing to copy
|
||||
// copy the contents if CopyShortcutContent
|
||||
// else copy the shortcut only
|
||||
|
||||
id := shortcutID(srcObj.id)
|
||||
|
||||
if f.opt.CopyShortcutContent {
|
||||
id = actualID(srcObj.id)
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Copy(id, createInfo).
|
||||
copy := f.svc.Files.Copy(id, createInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
Context(ctx).Do()
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever)
|
||||
srcObj.addResourceKey(copy.Header())
|
||||
info, err = copy.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2425,7 +2558,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if f.opt.TrashedOnly {
|
||||
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
|
||||
return errors.New("can't purge with --drive-trashed-only, use delete if you want to selectively delete files")
|
||||
}
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
@@ -2544,9 +2677,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -2771,6 +2904,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
changesCall.Spaces("appDataFolder")
|
||||
}
|
||||
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -2891,12 +3025,12 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
|
||||
return fmt.Errorf("drive: failed when making oauth client: %w", err)
|
||||
}
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.New(f.client)
|
||||
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't create Drive client: %w", err)
|
||||
}
|
||||
if f.opt.V2DownloadMinSize >= 0 {
|
||||
f.v2Svc, err = drive_v2.New(f.client)
|
||||
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||
}
|
||||
@@ -3185,7 +3319,7 @@ This will return a JSON list of objects like this
|
||||
|
||||
With the -o config parameter it will output the list in a format
|
||||
suitable for adding to a config file to make aliases for all the
|
||||
drives found.
|
||||
drives found and a combined drive.
|
||||
|
||||
[My Drive]
|
||||
type = alias
|
||||
@@ -3195,10 +3329,15 @@ drives found.
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. This may require manual editing
|
||||
of the names.
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal characters will be
|
||||
substituted with "_" and duplicate names will have numbers suffixed.
|
||||
It will also add a remote called AllDrives which shows all the shared
|
||||
drives combined into one directory tree.
|
||||
`,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
@@ -3212,9 +3351,9 @@ This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend -i untrash drive:directory subdir
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
|
||||
Use the -i flag to see what would be restored before restoring it.
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
@@ -3244,8 +3383,14 @@ component will be used as the file name.
|
||||
If the destination is a drive backend then server-side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
Short: "Dump the export formats for debug purposes",
|
||||
}, {
|
||||
Name: "importformats",
|
||||
Short: "Dump the import formats for debug purposes",
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -3265,7 +3410,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
out["service_account_file"] = f.opt.ServiceAccountFile
|
||||
}
|
||||
if _, ok := opt["chunk_size"]; ok {
|
||||
out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
out["chunk_size"] = f.opt.ChunkSize.String()
|
||||
}
|
||||
return out, nil
|
||||
case "set":
|
||||
@@ -3282,11 +3427,11 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
if chunkSize, ok := opt["chunk_size"]; ok {
|
||||
chunkSizeMap := make(map[string]string)
|
||||
chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
chunkSizeMap["previous"] = f.opt.ChunkSize.String()
|
||||
if err = f.changeChunkSize(chunkSize); err != nil {
|
||||
return out, err
|
||||
}
|
||||
chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
chunkSizeString := f.opt.ChunkSize.String()
|
||||
f.m.Set("chunk_size", chunkSizeString)
|
||||
chunkSizeMap["current"] = chunkSizeString
|
||||
out["chunk_size"] = chunkSizeMap
|
||||
@@ -3316,12 +3461,27 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
if _, ok := opt["config"]; ok {
|
||||
lines := []string{}
|
||||
for _, drive := range drives {
|
||||
upstreams := []string{}
|
||||
names := make(map[string]struct{}, len(drives))
|
||||
for i, drive := range drives {
|
||||
name := fspath.MakeConfigName(drive.Name)
|
||||
for {
|
||||
if _, found := names[name]; !found {
|
||||
break
|
||||
}
|
||||
name += fmt.Sprintf("-%d", i)
|
||||
}
|
||||
names[name] = struct{}{}
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
|
||||
lines = append(lines, fmt.Sprintf("type = alias"))
|
||||
lines = append(lines, fmt.Sprintf("[%s]", name))
|
||||
lines = append(lines, "type = alias")
|
||||
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
|
||||
upstreams = append(upstreams, fmt.Sprintf(`"%s=%s:"`, name, name))
|
||||
}
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, "[AllDrives]")
|
||||
lines = append(lines, "type = combine")
|
||||
lines = append(lines, fmt.Sprintf("upstreams = %s", strings.Join(upstreams, " ")))
|
||||
return lines, nil
|
||||
}
|
||||
return drives, nil
|
||||
@@ -3344,6 +3504,10 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
case "exportformats":
|
||||
return f.exportFormats(ctx), nil
|
||||
case "importformats":
|
||||
return f.importFormats(ctx), nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
@@ -3393,12 +3557,6 @@ func (o *baseObject) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// getRemoteInfo returns a drive.File for the remote
|
||||
func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
|
||||
info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
|
||||
return
|
||||
}
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
@@ -3439,7 +3597,6 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *baseObject) ModTime(ctx context.Context) time.Time {
|
||||
@@ -3480,6 +3637,14 @@ func (o *baseObject) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// addResourceKey adds a X-Goog-Drive-Resource-Keys header for this
|
||||
// object if required.
|
||||
func (o *baseObject) addResourceKey(header http.Header) {
|
||||
if o.resourceKey != nil {
|
||||
header.Add("X-Goog-Drive-Resource-Keys", fmt.Sprintf("%s/%s", o.id, *o.resourceKey))
|
||||
}
|
||||
}
|
||||
|
||||
// httpResponse gets an http.Response object for the object
|
||||
// using the url and method passed in
|
||||
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||
@@ -3495,6 +3660,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
delete(req.Header, "Range")
|
||||
}
|
||||
o.addResourceKey(req.Header)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
@@ -3574,7 +3740,7 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
|
||||
url += "acknowledgeAbuse=true"
|
||||
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
||||
} else {
|
||||
err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
|
||||
err = fmt.Errorf("use the --drive-acknowledge-abuse flag to download this file: %w", err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -3661,7 +3827,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
data = data[:limit]
|
||||
}
|
||||
|
||||
return ioutil.NopCloser(bytes.NewReader(data)), nil
|
||||
return io.NopCloser(bytes.NewReader(data)), nil
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
@@ -3687,7 +3853,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
@@ -3720,7 +3886,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"os"
|
||||
"path"
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
@@ -76,7 +77,7 @@ var additionalMimeTypes = map[string]string{
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
||||
fetchFormatsOnce.Do(func() {})
|
||||
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
var about struct {
|
||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
||||
@@ -190,6 +191,69 @@ func TestExtensionsForImportFormats(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
gatewayTimeout := googleapi.Error{
|
||||
Code: 503,
|
||||
}
|
||||
timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout)
|
||||
assert.True(t, timeoutRetry)
|
||||
assert.Equal(t, &gatewayTimeout, timeoutError)
|
||||
generic403 := googleapi.Error{
|
||||
Code: 403,
|
||||
}
|
||||
rLEItem := googleapi.ErrorItem{
|
||||
Reason: "rateLimitExceeded",
|
||||
Message: "User rate limit exceeded.",
|
||||
}
|
||||
generic403.Errors = append(generic403.Errors, rLEItem)
|
||||
oldStopUpload := f.opt.StopOnUploadLimit
|
||||
oldStopDownload := f.opt.StopOnDownloadLimit
|
||||
f.opt.StopOnUploadLimit = true
|
||||
f.opt.StopOnDownloadLimit = true
|
||||
defer func() {
|
||||
f.opt.StopOnUploadLimit = oldStopUpload
|
||||
f.opt.StopOnDownloadLimit = oldStopDownload
|
||||
}()
|
||||
expectedRLError := fserrors.FatalError(&generic403)
|
||||
rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, rateLimitRetry)
|
||||
assert.Equal(t, rateLimitErr, expectedRLError)
|
||||
dQEItem := googleapi.ErrorItem{
|
||||
Reason: "downloadQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = dQEItem
|
||||
expectedDQError := fserrors.FatalError(&generic403)
|
||||
downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, downloadQuotaRetry)
|
||||
assert.Equal(t, downloadQuotaError, expectedDQError)
|
||||
tDFLEItem := googleapi.ErrorItem{
|
||||
Reason: "teamDriveFileLimitExceeded",
|
||||
}
|
||||
generic403.Errors[0] = tDFLEItem
|
||||
expectedTDFLError := fserrors.FatalError(&generic403)
|
||||
teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, teamDriveFileLimitRetry)
|
||||
assert.Equal(t, teamDriveFileLimitError, expectedTDFLError)
|
||||
qEItem := googleapi.ErrorItem{
|
||||
Reason: "quotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = qEItem
|
||||
expectedQuotaError := fserrors.FatalError(&generic403)
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
|
||||
sqEItem := googleapi.ErrorItem{
|
||||
Reason: "storageQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = sqEItem
|
||||
expectedStorageQuotaError := fserrors.FatalError(&generic403)
|
||||
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, storageQuotaExceededRetry)
|
||||
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
oldAllow := f.opt.AllowImportNameChange
|
||||
f.opt.AllowImportNameChange = true
|
||||
@@ -378,9 +442,9 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
// Make some objects, one in a subdir
|
||||
contents := random.String(100)
|
||||
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
|
||||
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||
obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
|
||||
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||
_ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||
|
||||
// Check objects
|
||||
checkObjects := func() {
|
||||
@@ -422,11 +486,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
dir := t.TempDir()
|
||||
|
||||
checkFile := func(name string) {
|
||||
filePath := filepath.Join(dir, name)
|
||||
@@ -466,6 +526,9 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
opt := &filter.Opt{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
@@ -491,24 +554,16 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
subFs, isDriveFs := subFsResult.(*Fs)
|
||||
require.True(t, isDriveFs)
|
||||
|
||||
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir1)
|
||||
}()
|
||||
tempDir1 := t.TempDir()
|
||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir2)
|
||||
}()
|
||||
tempDir2 := t.TempDir()
|
||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
||||
require.NoError(t, err)
|
||||
|
||||
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
|
||||
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
||||
_ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
||||
|
||||
// validate sync/copy
|
||||
const timeQuery = "(modifiedTime >= '"
|
||||
@@ -557,6 +612,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
@@ -118,12 +117,12 @@ func (b *batcher) Batching() bool {
|
||||
}
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
var arg = &files.UploadSessionFinishBatchArg{
|
||||
Entries: items,
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
||||
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
@@ -137,50 +136,7 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("batch commit failed: %w", err)
|
||||
}
|
||||
return batchStatus, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
if launchBatchStatus.AsyncJobId == "" {
|
||||
return nil, errors.New("wait for batch completion: empty job ID")
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxSleepTime = 1 * time.Second
|
||||
startTime := time.Now()
|
||||
try := 1
|
||||
for {
|
||||
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
|
||||
if remaining < 0 {
|
||||
break
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > maxSleepTime {
|
||||
sleepTime = maxSleepTime
|
||||
}
|
||||
try++
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
|
||||
return complete, nil
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
@@ -188,7 +144,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
// If commit fails then signal clients if sync
|
||||
var signalled = b.async
|
||||
defer func() {
|
||||
if err != nil && signalled {
|
||||
if err != nil && !signalled {
|
||||
// Signal to clients that there was an error
|
||||
for _, result := range results {
|
||||
result <- batcherResponse{err: err}
|
||||
@@ -199,26 +155,11 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
fs.Debugf(b.f, "Committing %s", desc)
|
||||
|
||||
// finalise the batch getting either a result or a job id to poll
|
||||
batchStatus, err := b.finishBatch(ctx, items)
|
||||
complete, err := b.finishBatch(ctx, items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check whether batch is complete
|
||||
var complete *files.UploadSessionFinishBatchResult
|
||||
switch batchStatus.Tag {
|
||||
case "async_job_id":
|
||||
// wait for batch to complete
|
||||
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "complete":
|
||||
complete = batchStatus.Complete
|
||||
default:
|
||||
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||
}
|
||||
|
||||
// Check we got the right number of entries
|
||||
entries := complete.Entries
|
||||
if len(entries) != len(results) {
|
||||
@@ -319,9 +260,12 @@ outer:
|
||||
//
|
||||
// Can be called from atexit handler
|
||||
func (b *batcher) Shutdown() {
|
||||
if !b.Batching() {
|
||||
return
|
||||
}
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
fs.Infof(b.f, "Committing uploads - please wait...")
|
||||
// show that batcher is shutting down
|
||||
close(b.closed)
|
||||
// quit the commitLoop by sending a quitRequest message
|
||||
|
||||
@@ -58,7 +58,7 @@ import (
|
||||
const (
|
||||
rcloneClientID = "5jcck7diasz0rqy"
|
||||
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
|
||||
minSleep = 10 * time.Millisecond
|
||||
defaultMinSleep = fs.Duration(10 * time.Millisecond)
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
@@ -260,17 +260,22 @@ uploaded.
|
||||
The default for this is 0 which means rclone will choose a sensible
|
||||
default based on the batch_mode in use.
|
||||
|
||||
- batch_mode: async - default batch_timeout is 500ms
|
||||
- batch_mode: sync - default batch_timeout is 10s
|
||||
- batch_mode: async - default batch_timeout is 10s
|
||||
- batch_mode: sync - default batch_timeout is 500ms
|
||||
- batch_mode: off - not in use
|
||||
`,
|
||||
Default: fs.Duration(0),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_commit_timeout",
|
||||
Help: `Max time to wait for a batch to finish comitting`,
|
||||
Help: `Max time to wait for a batch to finish committing`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pacer_min_sleep",
|
||||
Default: defaultMinSleep,
|
||||
Help: "Minimum time to sleep between API calls.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -299,6 +304,7 @@ type Options struct {
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -442,7 +448,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
||||
if err != nil {
|
||||
@@ -472,10 +478,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
args := team.NewMembersGetInfoArgs(members)
|
||||
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||
}
|
||||
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
|
||||
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
}
|
||||
@@ -534,7 +542,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// if the moint failed we have to abort here
|
||||
// if the mount failed we have to abort here
|
||||
}
|
||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||
// we disable shared folder mode and proceed normally
|
||||
@@ -717,7 +725,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -904,7 +912,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||
@@ -923,7 +931,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1042,9 +1050,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1103,9 +1111,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1197,7 +1205,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return
|
||||
}
|
||||
if len(listRes.Links) == 0 {
|
||||
err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
|
||||
err = errors.New("sharing link already exists, but list came back empty")
|
||||
return
|
||||
}
|
||||
linkRes = listRes.Links[0]
|
||||
@@ -1209,7 +1217,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
case *sharing.FolderLinkMetadata:
|
||||
link = res.Url
|
||||
default:
|
||||
err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
|
||||
err = fmt.Errorf("don't know how to extract link, response has unknown format: %T", res)
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -1269,7 +1277,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
var total uint64
|
||||
if q.Allocation != nil {
|
||||
@@ -1370,10 +1378,12 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
|
||||
if timeout < 30 {
|
||||
timeout = 30
|
||||
fs.Debugf(f, "Increasing poll interval to minimum 30s")
|
||||
}
|
||||
|
||||
if timeout > 480 {
|
||||
timeout = 480
|
||||
fs.Debugf(f, "Decreasing poll interval to maximum 480s")
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1431,7 +1441,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
}
|
||||
|
||||
if entryPath != "" {
|
||||
notifyFunc(entryPath, entryType)
|
||||
notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType)
|
||||
}
|
||||
}
|
||||
if !changeList.HasMore {
|
||||
@@ -1650,13 +1660,37 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
}
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
skip := int64(0)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
// after session is started, we retry everything
|
||||
if err != nil {
|
||||
// Check for incorrect offset error and retry with new offset
|
||||
if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
|
||||
if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
|
||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||
skip += delta
|
||||
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
if skip < 0 {
|
||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||
} else if skip == chunkSize {
|
||||
fs.Debugf(o, "%s: chunk received OK - continuing", what)
|
||||
return false, nil
|
||||
} else if skip > chunkSize {
|
||||
// This error should never happen
|
||||
return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
|
||||
}
|
||||
// Skip the sent data on next retry
|
||||
cursor.Offset = uint64(int64(cursor.Offset) + delta)
|
||||
fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1669,6 +1703,9 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
if size > 0 {
|
||||
// if size is known, check if next chunk is final
|
||||
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
||||
if in.BytesRead() > uint64(size) {
|
||||
return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
|
||||
}
|
||||
} else {
|
||||
// if size is unknown, upload as long as we can read full chunks from the reader
|
||||
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
||||
@@ -1732,7 +1769,7 @@ func checkPathLength(name string) (err error) {
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
@@ -1760,7 +1797,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
entry, err = o.fs.srv.Upload(commitInfo, in)
|
||||
entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -28,25 +28,44 @@ var retryErrorCodes = []int{
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
|
||||
|
||||
func parseFichierError(err error) int {
|
||||
matches := errorRegex.FindStringSubmatch(err.Error())
|
||||
if len(matches) == 0 {
|
||||
return 0
|
||||
}
|
||||
code, err := strconv.Atoi(matches[0])
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
||||
return 0
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Detect this error which the integration tests provoke
|
||||
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
// 1Fichier uses HTTP error code 403 (Forbidden) for all kinds of errors with
|
||||
// responses looking like this: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
//
|
||||
// https://1fichier.com/api.html
|
||||
//
|
||||
// file/ls.cgi is limited :
|
||||
//
|
||||
// Warning (can be changed in case of abuses) :
|
||||
// List all files of the account is limited to 1 request per hour.
|
||||
// List folders is limited to 5 000 results and 1 request per folder per 30s.
|
||||
if err != nil && strings.Contains(err.Error(), "Flood detected") {
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
// We attempt to parse the actual 1Fichier error code from this body and handle it accordingly
|
||||
// Most importantly #374 (Flood detected: IP locked) which the integration tests provoke
|
||||
// The list below is far from complete and should be expanded if we see any more error codes.
|
||||
if err != nil {
|
||||
switch parseFichierError(err) {
|
||||
case 93:
|
||||
return false, err // No such user
|
||||
case 186:
|
||||
return false, err // IP blocked?
|
||||
case 374:
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
default:
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
@@ -99,6 +118,9 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
Single: 1,
|
||||
Pass: f.opt.FilePassword,
|
||||
}
|
||||
if f.opt.CDN {
|
||||
request.CDN = 1
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/download/get_token.cgi",
|
||||
@@ -454,7 +476,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("didnt got an upload node: %w", err)
|
||||
return nil, fmt.Errorf("didn't get an upload node: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
@@ -468,7 +490,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
fileName = f.opt.Enc.FromStandardName(fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
return nil, errors.New("invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -510,7 +532,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
return nil, errors.New("invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package fichier provides an interface to the 1Fichier storage system.
|
||||
package fichier
|
||||
|
||||
import (
|
||||
@@ -42,20 +43,22 @@ func init() {
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
||||
Name: "file_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
||||
Name: "folder_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "Set if you wish to use CDN download links.",
|
||||
Name: "cdn",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -91,6 +94,7 @@ type Options struct {
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
FilePassword string `config:"file_password"`
|
||||
FolderPassword string `config:"folder_password"`
|
||||
CDN bool `config:"cdn"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -297,7 +301,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
path, ok := f.dirCache.GetInv(directoryID)
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("Cannot find dir in dircache")
|
||||
return nil, errors.New("cannot find dir in dircache")
|
||||
}
|
||||
|
||||
return f.newObjectFromFile(ctx, path, file), nil
|
||||
@@ -335,7 +339,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
return nil, errors.New("File too big, can't upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
@@ -517,6 +521,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/user/info.cgi",
|
||||
ContentType: "application/json",
|
||||
}
|
||||
var accountInfo AccountInfo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
|
||||
// FIXME max upload size would be useful to use in Update
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
|
||||
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
|
||||
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
|
||||
@@ -20,6 +20,7 @@ type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
CDN int `json:"cdn,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
@@ -84,7 +85,7 @@ type CopyFileResponse struct {
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
// FileCopy is used in the CopyFileResponse
|
||||
type FileCopy struct {
|
||||
FromURL string `json:"from_url"`
|
||||
ToURL string `json:"to_url"`
|
||||
@@ -182,3 +183,34 @@ type FoldersList struct {
|
||||
Status string `json:"Status"`
|
||||
SubFolders []Folder `json:"sub_folders"`
|
||||
}
|
||||
|
||||
// AccountInfo is the structure how 1Fichier returns user info
|
||||
type AccountInfo struct {
|
||||
StatsDate string `json:"stats_date"`
|
||||
MailRM string `json:"mail_rm"`
|
||||
DefaultQuota int64 `json:"default_quota"`
|
||||
UploadForbidden string `json:"upload_forbidden"`
|
||||
PageLimit int `json:"page_limit"`
|
||||
ColdStorage int64 `json:"cold_storage"`
|
||||
Status string `json:"status"`
|
||||
UseCDN string `json:"use_cdn"`
|
||||
AvailableColdStorage int64 `json:"available_cold_storage"`
|
||||
DefaultPort string `json:"default_port"`
|
||||
DefaultDomain int `json:"default_domain"`
|
||||
Email string `json:"email"`
|
||||
DownloadMenu string `json:"download_menu"`
|
||||
FTPDID int `json:"ftp_did"`
|
||||
DefaultPortFiles string `json:"default_port_files"`
|
||||
FTPReport string `json:"ftp_report"`
|
||||
OverQuota int64 `json:"overquota"`
|
||||
AvailableStorage int64 `json:"available_storage"`
|
||||
CDN string `json:"cdn"`
|
||||
Offer string `json:"offer"`
|
||||
SubscriptionEnd string `json:"subscription_end"`
|
||||
TFA string `json:"2fa"`
|
||||
AllowedColdStorage int64 `json:"allowed_cold_storage"`
|
||||
HotStorage int64 `json:"hot_storage"`
|
||||
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
|
||||
FTPMode string `json:"ftp_mode"`
|
||||
RUReport string `json:"ru_report"`
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ const (
|
||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// filefabric API
|
||||
type Time time.Time
|
||||
|
||||
@@ -95,7 +95,7 @@ type Status struct {
|
||||
// Warning string `json:"warning"` // obsolete
|
||||
}
|
||||
|
||||
// Status statisfies the error interface
|
||||
// Status satisfies the error interface
|
||||
func (e *Status) Error() string {
|
||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -150,7 +149,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
m configmap.Mapper // to save config
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenMu sync.Mutex // hold when reading the token
|
||||
@@ -373,7 +372,7 @@ type params map[string]interface{}
|
||||
|
||||
// rpc calls the rpc.php method of the SME file fabric
|
||||
//
|
||||
// This is an entry point to all the method calls
|
||||
// This is an entry point to all the method calls.
|
||||
//
|
||||
// If result is nil then resp.Body will need closing
|
||||
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
|
||||
@@ -490,7 +489,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Root is a dir - cache its ID
|
||||
f.dirCache.Put(f.root, info.ID)
|
||||
}
|
||||
} else {
|
||||
//} else {
|
||||
// Root is not found so a directory
|
||||
}
|
||||
}
|
||||
@@ -678,7 +677,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -697,7 +696,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -783,9 +782,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -843,7 +842,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Wait for the the background task to complete if necessary
|
||||
// Wait for the background task to complete if necessary
|
||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
||||
if taskID == "" || taskID == "0" {
|
||||
// No task to wait for
|
||||
@@ -956,9 +955,9 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1135,7 +1134,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1187,7 +1185,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
if o.contentType == emptyMimeType {
|
||||
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
return io.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
resp, err := o.fs.rpc(ctx, "getFile", params{
|
||||
@@ -1201,7 +1199,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -45,18 +45,20 @@ const (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "ftp",
|
||||
Description: "FTP Connection",
|
||||
Description: "FTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + currentUser + ".",
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21).",
|
||||
Name: "port",
|
||||
Help: "FTP port number.",
|
||||
Default: 21,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password.",
|
||||
@@ -68,7 +70,7 @@ func init() {
|
||||
When using implicit FTP over TLS the client connects using TLS
|
||||
right from the start which breaks compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
than port 21. Cannot be used in combination with explicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "explicit_tls",
|
||||
@@ -76,11 +78,25 @@ than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
|
||||
When using explicit FTP over TLS the client explicitly requests
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
|
||||
Name: "concurrency",
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure concurrency is one more
|
||||
than the sum of |--transfers| and |--checkers|.
|
||||
|
||||
If you use |--check-first| then it just needs to be one more than the
|
||||
maximum of |--checkers| and |--transfers|.
|
||||
|
||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`", -1),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -98,11 +114,21 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Help: "Disable using MLSD even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_utf8",
|
||||
Help: "Disable using UTF-8 even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writing_mdtm",
|
||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_list_hidden",
|
||||
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -182,7 +208,9 @@ type Options struct {
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
ForceListHidden bool `config:"force_list_hidden"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
@@ -287,18 +315,33 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Return a *textproto.Error if err contains one or nil otherwise
|
||||
func textprotoError(err error) (errX *textproto.Error) {
|
||||
if errors.As(err, &errX) {
|
||||
return errX
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if this FTP error should be retried
|
||||
func isRetriableFtpError(err error) bool {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
if isRetriableFtpError(err) {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -308,14 +351,44 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
|
||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||
initialConnection := true
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
fs.Debugf(f, "dial(%q,%q)", network, address)
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
}()
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
if f.tlsConf != nil && err == nil {
|
||||
conn = tls.Client(conn, f.tlsConf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
// Connect using cleartext only for non TLS
|
||||
if f.tlsConf == nil {
|
||||
return conn, nil
|
||||
}
|
||||
// Initial connection only needs to be cleartext for explicit TLS
|
||||
if f.opt.ExplicitTLS && initialConnection {
|
||||
initialConnection = false
|
||||
return conn, nil
|
||||
}
|
||||
// Upgrade connection to TLS
|
||||
tlsConn := tls.Client(conn, f.tlsConf)
|
||||
// Do the initial handshake - tls.Client doesn't do it for us
|
||||
// If we do this then connections to proftpd/pureftpd lock up
|
||||
// See: https://github.com/rclone/rclone/issues/6426
|
||||
// See: https://github.com/jlaffaye/ftp/issues/282
|
||||
if false {
|
||||
err = tlsConn.HandshakeContext(ctx)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return tlsConn, nil
|
||||
}
|
||||
ftpConfig := []ftp.DialOption{
|
||||
ftp.DialWithContext(ctx),
|
||||
ftp.DialWithDialFunc(dial),
|
||||
}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
|
||||
|
||||
if f.opt.TLS {
|
||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||
@@ -323,12 +396,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
// Initial connection needs to be cleartext for explicit TLS
|
||||
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
@@ -336,12 +403,18 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.DisableMLSD {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||
}
|
||||
if f.opt.DisableUTF8 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledUTF8(true))
|
||||
}
|
||||
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
||||
}
|
||||
if f.opt.WritingMDTM {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
||||
}
|
||||
if f.opt.ForceListHidden {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
|
||||
}
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
@@ -405,8 +478,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
var tpErr *textproto.Error
|
||||
if !errors.As(err, &tpErr) {
|
||||
if tpErr := textprotoError(err); tpErr != nil {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
@@ -476,7 +548,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
protocol = "ftps://"
|
||||
}
|
||||
if opt.TLS && opt.ExplicitTLS {
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
||||
}
|
||||
var tlsConfig *tls.Config
|
||||
if opt.TLS || opt.ExplicitTLS {
|
||||
@@ -508,6 +580,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
@@ -555,8 +628,7 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
@@ -567,8 +639,7 @@ func translateErrorFile(err error) error {
|
||||
|
||||
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
||||
func translateErrorDir(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
@@ -599,8 +670,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
if remote == "" || remote == "." || remote == "/" {
|
||||
// if root, assume exists and synthesize an entry
|
||||
return &ftp.Entry{
|
||||
Name: "",
|
||||
@@ -608,13 +678,38 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
Time: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("findItem: %w", err)
|
||||
}
|
||||
|
||||
// returns TRUE if MLST is supported which is required to call GetEntry
|
||||
if c.IsTimePreciseInList() {
|
||||
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
err = translateErrorFile(err)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusBadArguments:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil {
|
||||
f.entryToStandard(entry)
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -633,7 +728,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -655,7 +750,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("dirExists: %w", err)
|
||||
}
|
||||
@@ -707,7 +802,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f, "Timeout when waiting for List")
|
||||
return nil, errors.New("Timeout when waiting for List")
|
||||
return nil, errors.New("timeout when waiting for List")
|
||||
}
|
||||
|
||||
// Annoyingly FTP returns success for a directory which
|
||||
@@ -758,11 +853,12 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// Precision shows whether modified time is supported or not depending on the
|
||||
// FTP server capabilities, namely whether FTP server:
|
||||
// - accepts the MDTM command to get file time (fGetTime)
|
||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
||||
// - accepts the MFMT command to set file time (fSetTime)
|
||||
// or non-standard form of the MDTM command (fSetTime, too)
|
||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
||||
// - accepts the MDTM command to get file time (fGetTime)
|
||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
||||
// - accepts the MFMT command to set file time (fSetTime)
|
||||
// or non-standard form of the MDTM command (fSetTime, too)
|
||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
||||
//
|
||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if (f.fGetTime || f.fLstTime) && f.fSetTime {
|
||||
@@ -798,32 +894,18 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
// getInfo reads the FileInfo for a path
|
||||
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
file, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getInfo: %w", err)
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
file := files[i]
|
||||
f.entryToStandard(file)
|
||||
if file.Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
return nil, err
|
||||
} else if file != nil {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -854,8 +936,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
@@ -1024,7 +1105,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if !o.fs.fSetTime {
|
||||
fs.Errorf(o.fs, "SetModTime is not supported")
|
||||
fs.Debugf(o.fs, "SetModTime is not supported")
|
||||
return nil
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
@@ -1096,8 +1177,7 @@ func (f *ftpReadCloser) Close() error {
|
||||
// mask the error if it was caused by a premature close
|
||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
err = nil
|
||||
@@ -1123,22 +1203,33 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
|
||||
var (
|
||||
fd *ftp.Response
|
||||
c *ftp.ServerConn
|
||||
)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
c, err = o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return false, err // getFtpConnection has retries already
|
||||
}
|
||||
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
@@ -1164,13 +1255,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
if err != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||
// test that big file uploads do not cause network i/o timeout
|
||||
func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
const (
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 40 * time.Millisecond // small because test server is local
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 1 * time.Second // small because test server is local
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
)
|
||||
|
||||
if testing.Short() {
|
||||
|
||||
@@ -19,11 +19,12 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -43,6 +44,7 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
option "google.golang.org/api/option"
|
||||
|
||||
// NOTE: This API is deprecated
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
@@ -65,7 +67,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -80,7 +82,8 @@ func init() {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
envAuth, _ := m.Get("env_auth")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
@@ -90,6 +93,9 @@ func init() {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "user_project",
|
||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
@@ -182,15 +188,30 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo",
|
||||
}, {
|
||||
Value: "asia-northeast2",
|
||||
Help: "Osaka",
|
||||
}, {
|
||||
Value: "asia-northeast3",
|
||||
Help: "Seoul",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai",
|
||||
}, {
|
||||
Value: "asia-south2",
|
||||
Help: "Delhi",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore",
|
||||
}, {
|
||||
Value: "asia-southeast2",
|
||||
Help: "Jakarta",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney",
|
||||
}, {
|
||||
Value: "australia-southeast2",
|
||||
Help: "Melbourne",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland",
|
||||
@@ -206,6 +227,12 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands",
|
||||
}, {
|
||||
Value: "europe-west6",
|
||||
Help: "Zürich",
|
||||
}, {
|
||||
Value: "europe-central2",
|
||||
Help: "Warsaw",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa",
|
||||
@@ -221,6 +248,33 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California",
|
||||
}, {
|
||||
Value: "us-west3",
|
||||
Help: "Salt Lake City",
|
||||
}, {
|
||||
Value: "us-west4",
|
||||
Help: "Las Vegas",
|
||||
}, {
|
||||
Value: "northamerica-northeast1",
|
||||
Help: "Montréal",
|
||||
}, {
|
||||
Value: "northamerica-northeast2",
|
||||
Help: "Toronto",
|
||||
}, {
|
||||
Value: "southamerica-east1",
|
||||
Help: "São Paulo",
|
||||
}, {
|
||||
Value: "southamerica-west1",
|
||||
Help: "Santiago",
|
||||
}, {
|
||||
Value: "asia1",
|
||||
Help: "Dual region: asia-northeast1 and asia-northeast2.",
|
||||
}, {
|
||||
Value: "eur4",
|
||||
Help: "Dual region: europe-north1 and europe-west4.",
|
||||
}, {
|
||||
Value: "nam4",
|
||||
Help: "Dual region: us-central1 and us-east1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -247,6 +301,41 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "directory_markers",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Upload an empty object with a trailing slash when a new directory is created
|
||||
|
||||
Empty folders are unsupported for bucket based remotes, this option creates an empty
|
||||
object ending with "/", to persist the folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "decompress",
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
|
||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||
set. Normally rclone will download these files as compressed objects.
|
||||
|
||||
If this flag is set then rclone will decompress these files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
can't check the size and hash but the file contents will be decompressed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -254,6 +343,17 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -261,6 +361,7 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
UserProject string `config:"user_project"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
Anonymous bool `config:"anonymous"`
|
||||
@@ -269,21 +370,27 @@ type Options struct {
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
warnCompressed sync.Once // warn once about compressed files
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -297,6 +404,7 @@ type Object struct {
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
mimeType string
|
||||
gzipped bool // set if object has Content-Encoding: gzip
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -314,7 +422,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("GCS root")
|
||||
return "GCS root"
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
||||
@@ -363,7 +471,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -406,7 +514,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
@@ -419,6 +527,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
@@ -434,7 +547,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
@@ -444,10 +557,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
f.svc, err = storage.New(f.client)
|
||||
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
|
||||
if opt.Endpoint != "" {
|
||||
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
|
||||
}
|
||||
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
||||
}
|
||||
@@ -456,7 +576,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = get.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -504,7 +628,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
// Set recurse to read sub directories.
|
||||
//
|
||||
// The remote has prefix removed from it and if addBucket is set
|
||||
// then it adds the bucket to the start.
|
||||
@@ -516,9 +640,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
directory += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
if !recurse {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
foundItems := 0
|
||||
for {
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -534,6 +662,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
return err
|
||||
}
|
||||
if !recurse {
|
||||
foundItems += len(objects.Prefixes)
|
||||
var object storage.Object
|
||||
for _, remote := range objects.Prefixes {
|
||||
if !strings.HasSuffix(remote, "/") {
|
||||
@@ -554,22 +683,29 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
}
|
||||
}
|
||||
foundItems += len(objects.Items)
|
||||
for _, object := range objects.Items {
|
||||
remote := f.opt.Enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
|
||||
err = fn(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -579,6 +715,17 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
list.PageToken(objects.NextPageToken)
|
||||
}
|
||||
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
|
||||
// Determine whether the directory exists or not by whether it has a marker
|
||||
_, err := f.readObjectInfo(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -622,6 +769,9 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
listBuckets = listBuckets.UserProject(f.opt.UserProject)
|
||||
}
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -722,7 +872,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -739,10 +889,69 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Create directory marker file and parents
|
||||
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
|
||||
if !f.opt.DirectoryMarkers || bucket == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object to be uploaded
|
||||
o := &Object{
|
||||
fs: f,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
|
||||
for {
|
||||
_, bucketPath := f.split(dir)
|
||||
// Don't create the directory marker if it is the bucket or at the very root
|
||||
if bucketPath == "" {
|
||||
break
|
||||
}
|
||||
o.remote = dir + "/"
|
||||
|
||||
// Check to see if object already exists
|
||||
_, err := o.readObjectInfo(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload it if not
|
||||
fs.Debugf(o, "Creating directory marker")
|
||||
content := io.Reader(strings.NewReader(""))
|
||||
err = o.Update(ctx, content, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating directory marker failed: %w", err)
|
||||
}
|
||||
|
||||
// Now check parent directory exists
|
||||
dir = path.Dir(dir)
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
e := f.checkBucket(ctx, bucket)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
return f.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
@@ -751,7 +960,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = list.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -786,24 +999,52 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
insertBucket = insertBucket.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
insertBucket = insertBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
|
||||
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||
if f.opt.NoCheckBucket {
|
||||
return nil
|
||||
}
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
}
|
||||
fs.Debugf(o, "Removing directory marker")
|
||||
err := o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing directory marker failed: %w", err)
|
||||
}
|
||||
}
|
||||
if bucket == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
@@ -816,16 +1057,16 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
err := f.mkdirParent(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -849,7 +1090,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
rewriteRequest = rewriteRequest.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
rewriteRequest.UserProject(f.opt.UserProject)
|
||||
}
|
||||
rewriteResponse, err = rewriteRequest.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -909,6 +1154,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
o.url = info.MediaLink
|
||||
o.bytes = int64(info.Size)
|
||||
o.mimeType = info.ContentType
|
||||
o.gzipped = info.ContentEncoding == "gzip"
|
||||
|
||||
// Read md5sum
|
||||
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
|
||||
@@ -947,13 +1193,28 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
} else {
|
||||
o.modTime = modTime
|
||||
}
|
||||
|
||||
// If gunzipping then size and md5sum are unknown
|
||||
if o.gzipped && o.fs.opt.Decompress {
|
||||
o.bytes = -1
|
||||
o.md5sum = ""
|
||||
}
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return o.fs.readObjectInfo(ctx, bucket, bucketPath)
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
object, err = get.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1025,7 +1286,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
copyObject = copyObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = copyObject.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1042,11 +1307,26 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.UserProject != "" {
|
||||
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
if o.gzipped && !o.fs.opt.Decompress {
|
||||
// Allow files which are stored on the cloud storage system
|
||||
// compressed to be downloaded without being decompressed. Note
|
||||
// that setting this here overrides the automatic decompression
|
||||
// in the Transport.
|
||||
//
|
||||
// See: https://cloud.google.com/storage/docs/transcoding
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
o.fs.warnCompressed.Do(func() {
|
||||
fs.Logf(o, "Not decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-decompress to override")
|
||||
})
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1073,11 +1353,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
if !strings.HasSuffix(o.remote, "/") {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
@@ -1122,7 +1405,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
insertObject = insertObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1137,7 +1424,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -16,3 +17,17 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestGoogleCloudStorage"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Google Photos API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -69,7 +69,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -178,7 +178,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
unAuth *rest.Client // unauthenticated http client
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
ts *oauthutil.TokenSource // token source for oauth2
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
@@ -562,7 +562,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
for i := range items {
|
||||
item := &result.MediaItems[i]
|
||||
remote := item.Filename
|
||||
remote = strings.Replace(remote, "/", "/", -1)
|
||||
remote = strings.ReplaceAll(remote, "/", "/")
|
||||
err = fn(remote, item, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -661,7 +661,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
|
||||
@@ -3,7 +3,7 @@ package googlephotos
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"testing"
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -37,7 +36,7 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -56,7 +55,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
@@ -99,7 +98,7 @@ func TestIntegration(t *testing.T) {
|
||||
t.Run("ObjectOpen", func(t *testing.T) {
|
||||
in, err := dstObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
buf, err := ioutil.ReadAll(in)
|
||||
buf, err := io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, in.Close())
|
||||
assert.True(t, len(buf) > 1000)
|
||||
@@ -221,7 +220,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
|
||||
@@ -315,7 +315,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
||||
|
||||
// featureFilter creates a filter for the Feature enum
|
||||
//
|
||||
// The API only supports one feature, FAVORITES, so hardcode that feature
|
||||
// The API only supports one feature, FAVORITES, so hardcode that feature.
|
||||
//
|
||||
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
|
||||
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
|
||||
|
||||
@@ -50,7 +50,7 @@ func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums,
|
||||
|
||||
// mock listUploads for testing
|
||||
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, _ = f.uploaded[dir]
|
||||
entries = f.uploaded[dir]
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
|
||||
@@ -27,6 +27,9 @@ func init() {
|
||||
Name: "hasher",
|
||||
Description: "Better checksums for other remotes",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
@@ -158,6 +161,12 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
IsLocal: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
@@ -202,7 +211,11 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
|
||||
for _, entry := range baseEntries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
hashEntries = append(hashEntries, f.wrapObject(x, nil))
|
||||
obj, err := f.wrapObject(x, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashEntries = append(hashEntries, obj)
|
||||
default:
|
||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
||||
}
|
||||
@@ -251,7 +264,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if do := f.Fs.Features().PutStream; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
return nil, errors.New("PutStream not supported")
|
||||
}
|
||||
@@ -261,7 +274,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
return nil, errors.New("PutUnchecked not supported")
|
||||
}
|
||||
@@ -278,7 +291,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
if do := f.Fs.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return errors.New("CleanUp not supported")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
@@ -286,7 +299,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if do := f.Fs.Features().About; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
@@ -348,7 +361,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
@@ -371,7 +384,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
dir: false,
|
||||
fs: f,
|
||||
})
|
||||
return f.wrapObject(oResult, nil), nil
|
||||
return f.wrapObject(oResult, nil)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
||||
@@ -410,7 +423,7 @@ func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, remote)
|
||||
return f.wrapObject(o, err), err
|
||||
return f.wrapObject(o, err)
|
||||
}
|
||||
|
||||
//
|
||||
@@ -424,11 +437,15 @@ type Object struct {
|
||||
}
|
||||
|
||||
// Wrap base object into hasher object
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
|
||||
if err != nil || o == nil {
|
||||
return nil
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
|
||||
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Object{Object: o, f: f}
|
||||
if o == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return &Object{Object: o, f: f}, nil
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
@@ -477,6 +494,17 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.Object.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -499,10 +527,5 @@ var (
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
|
||||
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
||||
o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
||||
require.NotNil(t, o)
|
||||
return o
|
||||
}
|
||||
@@ -35,7 +35,7 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
||||
// make a temporary crypt remote
|
||||
ctx := context.Background()
|
||||
pass := obscure.MustObscure("crypt")
|
||||
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
|
||||
remote := fmt.Sprintf(`:crypt,remote="%s",password="%s":`, tempRoot, pass)
|
||||
cryptFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ func TestIntegration(t *testing.T) {
|
||||
{Name: "TestHasher", Key: "remote", Value: tempDir},
|
||||
}
|
||||
opt.RemoteName = "TestHasher:"
|
||||
opt.QuickTestOK = true
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
@@ -118,7 +117,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
|
||||
defer func() {
|
||||
_ = r.Close()
|
||||
}()
|
||||
if _, err = io.Copy(ioutil.Discard, r); err != nil {
|
||||
if _, err = io.Copy(io.Discard, r); err != nil {
|
||||
fs.Infof(o, "update failed (copy): %v", err)
|
||||
return err
|
||||
}
|
||||
@@ -184,7 +183,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadC
|
||||
// Put data into the remote path with given modTime and size
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var (
|
||||
o *Object
|
||||
o fs.Object
|
||||
common hash.Set
|
||||
rehash bool
|
||||
hashes hashMap
|
||||
@@ -210,8 +209,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
||||
o = f.wrapObject(oResult, err)
|
||||
if o == nil {
|
||||
o, err = f.wrapObject(oResult, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -224,7 +223,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
err := o.putHashes(ctx, hashes)
|
||||
err := o.(*Object).putHashes(ctx, hashes)
|
||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
||||
}
|
||||
return o, err
|
||||
|
||||
@@ -92,7 +92,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.ServicePrincipalName != "" {
|
||||
options.KerberosClient, err = getKerberosClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
|
||||
return nil, fmt.Errorf("problem with kerberos authentication: %w", err)
|
||||
}
|
||||
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
||||
|
||||
@@ -265,9 +265,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package hdfs provides an interface to the HDFS storage system.
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
@@ -22,9 +23,8 @@ func init() {
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Required: false,
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
@@ -36,17 +36,15 @@ func init() {
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
Required: false,
|
||||
checks, and wire encryption are required when communicating with
|
||||
the datanodes. Possible values are 'authentication', 'integrity'
|
||||
and 'privacy'. Used only with KERBEROS enabled.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
|
||||
@@ -115,7 +115,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := o.fs.client.Stat(realpath)
|
||||
_, err = o.fs.client.Stat(realpath)
|
||||
if err == nil {
|
||||
err = o.fs.client.Remove(realpath)
|
||||
if err != nil {
|
||||
@@ -147,7 +147,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
info, err = o.fs.client.Stat(realpath)
|
||||
info, err := o.fs.client.Stat(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
81
backend/hidrive/api/queries.go
Normal file
81
backend/hidrive/api/queries.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Some presets for different amounts of information that can be requested for fields;
|
||||
// it is recommended to only request the information that is actually needed.
|
||||
var (
|
||||
HiDriveObjectNoMetadataFields = []string{"name", "type"}
|
||||
HiDriveObjectWithMetadataFields = append(HiDriveObjectNoMetadataFields, "id", "size", "mtime", "chash")
|
||||
HiDriveObjectWithDirectoryMetadataFields = append(HiDriveObjectWithMetadataFields, "nmembers")
|
||||
DirectoryContentFields = []string{"nmembers"}
|
||||
)
|
||||
|
||||
// QueryParameters represents the parameters passed to an API-call.
|
||||
type QueryParameters struct {
|
||||
url.Values
|
||||
}
|
||||
|
||||
// NewQueryParameters initializes an instance of QueryParameters and
|
||||
// returns a pointer to it.
|
||||
func NewQueryParameters() *QueryParameters {
|
||||
return &QueryParameters{url.Values{}}
|
||||
}
|
||||
|
||||
// SetFileInDirectory sets the appropriate parameters
|
||||
// to specify a path to a file in a directory.
|
||||
// This is used by requests that work with paths for files that do not exist yet.
|
||||
// (For example when creating a file).
|
||||
// Most requests use the format produced by SetPath(...).
|
||||
func (p *QueryParameters) SetFileInDirectory(filePath string) {
|
||||
directory, file := path.Split(path.Clean(filePath))
|
||||
p.Set("dir", path.Clean(directory))
|
||||
p.Set("name", file)
|
||||
// NOTE: It would be possible to switch to pid-based requests
|
||||
// by modifying this function.
|
||||
}
|
||||
|
||||
// SetPath sets the appropriate parameters to access the given path.
|
||||
func (p *QueryParameters) SetPath(objectPath string) {
|
||||
p.Set("path", path.Clean(objectPath))
|
||||
// NOTE: It would be possible to switch to pid-based requests
|
||||
// by modifying this function.
|
||||
}
|
||||
|
||||
// SetTime sets the key to the time-value. It replaces any existing values.
|
||||
func (p *QueryParameters) SetTime(key string, value time.Time) error {
|
||||
valueAPI := Time(value)
|
||||
valueBytes, err := json.Marshal(&valueAPI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Set(key, string(valueBytes))
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddList adds the given values as a list
|
||||
// with each value separated by the separator.
|
||||
// It appends to any existing values associated with key.
|
||||
func (p *QueryParameters) AddList(key string, separator string, values ...string) {
|
||||
original := p.Get(key)
|
||||
p.Set(key, strings.Join(values, separator))
|
||||
if original != "" {
|
||||
p.Set(key, original+separator+p.Get(key))
|
||||
}
|
||||
}
|
||||
|
||||
// AddFields sets the appropriate parameter to access the given fields.
|
||||
// The given fields will be appended to any other existing fields.
|
||||
func (p *QueryParameters) AddFields(prefix string, fields ...string) {
|
||||
modifiedFields := make([]string, len(fields))
|
||||
for i, field := range fields {
|
||||
modifiedFields[i] = prefix + field
|
||||
}
|
||||
p.AddList("fields", ",", modifiedFields...)
|
||||
}
|
||||
135
backend/hidrive/api/types.go
Normal file
135
backend/hidrive/api/types.go
Normal file
@@ -0,0 +1,135 @@
|
||||
// Package api has type definitions and code related to API-calls for the HiDrive-API.
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Time represents date and time information for the API.
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns Time into JSON (in Unix-time/UTC).
|
||||
func (t *Time) MarshalJSON() ([]byte, error) {
|
||||
secs := time.Time(*t).Unix()
|
||||
return []byte(strconv.FormatInt(secs, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into Time.
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
secs, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(time.Unix(secs, 0))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from the API when things go wrong.
|
||||
type Error struct {
|
||||
Code json.Number `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
Message string `json:"msg"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface.
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Code.String())
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.ContextInfo != nil {
|
||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface.
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// possible types for HiDriveObject
|
||||
const (
|
||||
HiDriveObjectTypeDirectory = "dir"
|
||||
HiDriveObjectTypeFile = "file"
|
||||
HiDriveObjectTypeSymlink = "symlink"
|
||||
)
|
||||
|
||||
// HiDriveObject describes a folder, a symlink or a file.
|
||||
// Depending on the type and content, not all fields are present.
|
||||
type HiDriveObject struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Size int64 `json:"size"`
|
||||
MemberCount int64 `json:"nmembers"`
|
||||
ModifiedAt Time `json:"mtime"`
|
||||
ChangedAt Time `json:"ctime"`
|
||||
MetaHash string `json:"mhash"`
|
||||
MetaOnlyHash string `json:"mohash"`
|
||||
NameHash string `json:"nhash"`
|
||||
ContentHash string `json:"chash"`
|
||||
IsTeamfolder bool `json:"teamfolder"`
|
||||
Readable bool `json:"readable"`
|
||||
Writable bool `json:"writable"`
|
||||
Shareable bool `json:"shareable"`
|
||||
MIMEType string `json:"mime_type"`
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the HiDriveObject.
|
||||
func (i *HiDriveObject) ModTime() time.Time {
|
||||
t := time.Time(i.ModifiedAt)
|
||||
if t.IsZero() {
|
||||
t = time.Time(i.ChangedAt)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into HiDriveObject and
|
||||
// introduces specific default-values where necessary.
|
||||
func (i *HiDriveObject) UnmarshalJSON(data []byte) error {
|
||||
type objectAlias HiDriveObject
|
||||
defaultObject := objectAlias{
|
||||
Size: -1,
|
||||
MemberCount: -1,
|
||||
}
|
||||
|
||||
err := json.Unmarshal(data, &defaultObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name, err := url.PathUnescape(defaultObject.Name)
|
||||
if err == nil {
|
||||
defaultObject.Name = name
|
||||
}
|
||||
|
||||
*i = HiDriveObject(defaultObject)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DirectoryContent describes the content of a directory.
|
||||
type DirectoryContent struct {
|
||||
TotalCount int64 `json:"nmembers"`
|
||||
Entries []HiDriveObject `json:"members"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into DirectoryContent and
|
||||
// introduces specific default-values where necessary.
|
||||
func (d *DirectoryContent) UnmarshalJSON(data []byte) error {
|
||||
type directoryContentAlias DirectoryContent
|
||||
defaultDirectoryContent := directoryContentAlias{
|
||||
TotalCount: -1,
|
||||
}
|
||||
|
||||
err := json.Unmarshal(data, &defaultDirectoryContent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*d = DirectoryContent(defaultDirectoryContent)
|
||||
return nil
|
||||
}
|
||||
879
backend/hidrive/helpers.go
Normal file
879
backend/hidrive/helpers.go
Normal file
@@ -0,0 +1,879 @@
|
||||
package hidrive
|
||||
|
||||
// This file is for helper-functions which may provide more general and
|
||||
// specialized functionality than the generic interfaces.
|
||||
// There are two sections:
|
||||
// 1. methods bound to Fs
|
||||
// 2. other functions independent from Fs used throughout the package
|
||||
|
||||
// NOTE: Functions accessing paths expect any relative paths
|
||||
// to be resolved prior to execution with resolvePath(...).
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/hidrive/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/ranges"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaximumUploadBytes represents the maximum amount of bytes
|
||||
// a single upload-operation will support.
|
||||
MaximumUploadBytes = 2147483647 // = 2GiB - 1
|
||||
// iterationChunkSize represents the chunk size used to iterate directory contents.
|
||||
iterationChunkSize = 5000
|
||||
)
|
||||
|
||||
var (
|
||||
// retryErrorCodes is a slice of error codes that we will always retry.
|
||||
retryErrorCodes = []int{
|
||||
429, // Too Many Requests
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
// ErrorFileExists is returned when a query tries to create a file
|
||||
// that already exists.
|
||||
ErrorFileExists = errors.New("destination file already exists")
|
||||
)
|
||||
|
||||
// MemberType represents the possible types of entries a directory can contain.
|
||||
type MemberType string
|
||||
|
||||
// possible values for MemberType
|
||||
const (
|
||||
AllMembers MemberType = "all"
|
||||
NoMembers MemberType = "none"
|
||||
DirectoryMembers MemberType = api.HiDriveObjectTypeDirectory
|
||||
FileMembers MemberType = api.HiDriveObjectTypeFile
|
||||
SymlinkMembers MemberType = api.HiDriveObjectTypeSymlink
|
||||
)
|
||||
|
||||
// SortByField represents possible fields to sort entries of a directory by.
|
||||
type SortByField string
|
||||
|
||||
// possible values for SortByField
|
||||
const (
|
||||
descendingSort string = "-"
|
||||
SortByName SortByField = "name"
|
||||
SortByModTime SortByField = "mtime"
|
||||
SortByObjectType SortByField = "type"
|
||||
SortBySize SortByField = "size"
|
||||
SortByNameDescending SortByField = SortByField(descendingSort) + SortByName
|
||||
SortByModTimeDescending SortByField = SortByField(descendingSort) + SortByModTime
|
||||
SortByObjectTypeDescending SortByField = SortByField(descendingSort) + SortByObjectType
|
||||
SortBySizeDescending SortByField = SortByField(descendingSort) + SortBySize
|
||||
)
|
||||
|
||||
var (
|
||||
// Unsorted disables sorting and can therefore not be combined with other values.
|
||||
Unsorted = []SortByField{"none"}
|
||||
// DefaultSorted does not specify how to sort and
|
||||
// therefore implies the default sort order.
|
||||
DefaultSorted = []SortByField{}
|
||||
)
|
||||
|
||||
// CopyOrMoveOperationType represents the possible types of copy- and move-operations.
|
||||
type CopyOrMoveOperationType int
|
||||
|
||||
// possible values for CopyOrMoveOperationType
|
||||
const (
|
||||
MoveOriginal CopyOrMoveOperationType = iota
|
||||
CopyOriginal
|
||||
CopyOriginalPreserveModTime
|
||||
)
|
||||
|
||||
// OnExistAction represents possible actions the API should take,
|
||||
// when a request tries to create a path that already exists.
|
||||
type OnExistAction string
|
||||
|
||||
// possible values for OnExistAction
|
||||
const (
|
||||
// IgnoreOnExist instructs the API not to execute
|
||||
// the request in case of a conflict, but to return an error.
|
||||
IgnoreOnExist OnExistAction = "ignore"
|
||||
// AutoNameOnExist instructs the API to automatically rename
|
||||
// any conflicting request-objects.
|
||||
AutoNameOnExist OnExistAction = "autoname"
|
||||
// OverwriteOnExist instructs the API to overwrite any conflicting files.
|
||||
// This can only be used, if the request operates on files directly.
|
||||
// (For example when moving/copying a file.)
|
||||
// For most requests this action will simply be ignored.
|
||||
OverwriteOnExist OnExistAction = "overwrite"
|
||||
)
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err deserve to be retried.
|
||||
// It tries to expire/invalidate the token, if necessary.
|
||||
// It returns the err as a convenience.
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if resp != nil && (resp.StatusCode == 401 || isHTTPError(err, 401)) && len(resp.Header["Www-Authenticate"]) > 0 {
|
||||
fs.Debugf(f, "Token might be invalid: %v", err)
|
||||
if f.tokenRenewer != nil {
|
||||
iErr := f.tokenRenewer.Expire()
|
||||
if iErr == nil {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// resolvePath resolves the given (relative) path and
|
||||
// returns a path suitable for API-calls.
|
||||
// This will consider the root-path of the fs and any needed prefixes.
|
||||
//
|
||||
// Any relative paths passed to functions that access these paths should
|
||||
// be resolved with this first!
|
||||
func (f *Fs) resolvePath(objectPath string) string {
|
||||
resolved := path.Join(f.opt.RootPrefix, f.root, f.opt.Enc.FromStandardPath(objectPath))
|
||||
return resolved
|
||||
}
|
||||
|
||||
// iterateOverDirectory calls the given function callback
|
||||
// on each item found in a given directory.
|
||||
//
|
||||
// If callback ever returns true then this exits early with found = true.
|
||||
func (f *Fs) iterateOverDirectory(ctx context.Context, directory string, searchOnly MemberType, callback func(*api.HiDriveObject) bool, fields []string, sortBy []SortByField) (found bool, err error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(directory)
|
||||
parameters.AddFields("members.", fields...)
|
||||
parameters.AddFields("", api.DirectoryContentFields...)
|
||||
parameters.Set("members", string(searchOnly))
|
||||
for _, v := range sortBy {
|
||||
// The explicit conversion is necessary for each element.
|
||||
parameters.AddList("sort", ",", string(v))
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/dir",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
iterateContent := func(result *api.DirectoryContent, err error) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, item := range result.Entries {
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if callback(&item) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return f.paginateDirectoryAccess(ctx, &opts, iterationChunkSize, 0, iterateContent)
|
||||
}
|
||||
|
||||
// paginateDirectoryAccess executes requests specified via ctx and opts
|
||||
// which should produce api.DirectoryContent.
|
||||
// This will paginate the requests using limit starting at the given offset.
|
||||
//
|
||||
// The given function callback is called on each api.DirectoryContent found
|
||||
// along with any errors that occurred.
|
||||
// If callback ever returns true then this exits early with found = true.
|
||||
// If callback ever returns an error then this exits early with that error.
|
||||
func (f *Fs) paginateDirectoryAccess(ctx context.Context, opts *rest.Opts, limit int64, offset int64, callback func(*api.DirectoryContent, error) (bool, error)) (found bool, err error) {
|
||||
for {
|
||||
opts.Parameters.Set("limit", strconv.FormatInt(offset, 10)+","+strconv.FormatInt(limit, 10))
|
||||
|
||||
var result api.DirectoryContent
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
found, err = callback(&result, err)
|
||||
if found || err != nil {
|
||||
return found, err
|
||||
}
|
||||
|
||||
offset += int64(len(result.Entries))
|
||||
if offset >= result.TotalCount || limit > int64(len(result.Entries)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// fetchMetadataForPath reads the metadata from the path.
|
||||
func (f *Fs) fetchMetadataForPath(ctx context.Context, path string, fields []string) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
parameters.AddFields("", fields...)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/meta",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// copyOrMove copies or moves a directory or file
|
||||
// from the source-path to the destination-path.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
//
|
||||
// NOTE: Use the explicit methods instead of directly invoking this method.
|
||||
// (Those are: copyDirectory, moveDirectory, copyFile, moveFile.)
|
||||
func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType CopyOrMoveOperationType, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.Set("src", source)
|
||||
parameters.Set("dst", destination)
|
||||
if onExist == AutoNameOnExist ||
|
||||
(onExist == OverwriteOnExist && !isDirectory) {
|
||||
parameters.Set("on_exist", string(onExist))
|
||||
}
|
||||
|
||||
endpoint := "/"
|
||||
if isDirectory {
|
||||
endpoint += "dir"
|
||||
} else {
|
||||
endpoint += "file"
|
||||
}
|
||||
switch operationType {
|
||||
case MoveOriginal:
|
||||
endpoint += "/move"
|
||||
case CopyOriginalPreserveModTime:
|
||||
parameters.Set("preserve_mtime", strconv.FormatBool(true))
|
||||
fallthrough
|
||||
case CopyOriginal:
|
||||
endpoint += "/copy"
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: endpoint,
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// moveDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
func (f *Fs) moveDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, true, MoveOriginal, source, destination, onExist)
|
||||
}
|
||||
|
||||
// copyFile copies the file at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
//
|
||||
// NOTE: This operation will expand sparse areas in the content of the source-file
|
||||
// to blocks of 0-bytes in the destination-file.
|
||||
func (f *Fs) copyFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, false, CopyOriginalPreserveModTime, source, destination, onExist)
|
||||
}
|
||||
|
||||
// moveFile moves the file at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
//
|
||||
// NOTE: This operation may expand sparse areas in the content of the source-file
|
||||
// to blocks of 0-bytes in the destination-file.
|
||||
func (f *Fs) moveFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, false, MoveOriginal, source, destination, onExist)
|
||||
}
|
||||
|
||||
// createDirectory creates the directory at the given path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The directory will only be created if its parent-directory exists.
|
||||
// This returns fs.ErrorDirNotFound if the parent-directory is not found.
|
||||
// This returns fs.ErrorDirExists if the directory already exists.
|
||||
func (f *Fs) createDirectory(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(directory)
|
||||
if onExist == AutoNameOnExist {
|
||||
parameters.Set("on_exist", string(onExist))
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/dir",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorDirNotFound
|
||||
case isHTTPError(err, 409):
|
||||
return nil, fs.ErrorDirExists
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// createDirectories creates the directory at the given path
|
||||
// along with any missing parent directories and
|
||||
// returns the resulting api-object (of the created directory) if successful.
|
||||
//
|
||||
// This returns fs.ErrorDirExists if the directory already exists.
|
||||
//
|
||||
// If an error occurs while the parent directories are being created,
|
||||
// any directories already created will NOT be deleted again.
|
||||
func (f *Fs) createDirectories(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
result, err := f.createDirectory(ctx, directory, onExist)
|
||||
if err == nil {
|
||||
return result, nil
|
||||
}
|
||||
if err != fs.ErrorDirNotFound {
|
||||
return nil, err
|
||||
}
|
||||
parentDirectory := path.Dir(directory)
|
||||
_, err = f.createDirectories(ctx, parentDirectory, onExist)
|
||||
if err != nil && err != fs.ErrorDirExists {
|
||||
return nil, err
|
||||
}
|
||||
// NOTE: Ignoring fs.ErrorDirExists does no harm,
|
||||
// since it does not mean the child directory cannot be created.
|
||||
return f.createDirectory(ctx, directory, onExist)
|
||||
}
|
||||
|
||||
// deleteDirectory deletes the directory at the given path.
|
||||
//
|
||||
// If recursive is false, the directory will only be deleted if it is empty.
|
||||
// If recursive is true, the directory will be deleted regardless of its content.
|
||||
// This returns fs.ErrorDirNotFound if the directory is not found.
|
||||
// This returns fs.ErrorDirectoryNotEmpty if the directory is not empty and
|
||||
// recursive is false.
|
||||
func (f *Fs) deleteDirectory(ctx context.Context, directory string, recursive bool) error {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(directory)
|
||||
parameters.Set("recursive", strconv.FormatBool(recursive))
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/dir",
|
||||
Parameters: parameters.Values,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case isHTTPError(err, 404):
|
||||
return fs.ErrorDirNotFound
|
||||
case isHTTPError(err, 409):
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// deleteObject deletes the object/file at the given path.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) deleteObject(ctx context.Context, path string) error {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/file",
|
||||
Parameters: parameters.Values,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if isHTTPError(err, 404) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// createFile creates a file at the given path
|
||||
// with the content of the io.ReadSeeker.
|
||||
// This guarantees that existing files will not be overwritten.
|
||||
// The maximum size of the content is limited by MaximumUploadBytes.
|
||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// This returns fs.ErrorDirNotFound
|
||||
// if the parent directory of the file is not found.
|
||||
// This returns ErrorFileExists if a file already exists at the specified path.
|
||||
func (f *Fs) createFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetFileInDirectory(path)
|
||||
if onExist == AutoNameOnExist {
|
||||
parameters.Set("on_exist", string(onExist))
|
||||
}
|
||||
|
||||
var err error
|
||||
if !modTime.IsZero() {
|
||||
err = parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file",
|
||||
Body: content,
|
||||
ContentType: "application/octet-stream",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// Reset the reading index (in case this is a retry).
|
||||
if _, err = content.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorDirNotFound
|
||||
case isHTTPError(err, 409):
|
||||
return nil, ErrorFileExists
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// overwriteFile updates the content of the file at the given path
|
||||
// with the content of the io.ReadSeeker.
|
||||
// If the file does not exist it will be created.
|
||||
// The maximum size of the content is limited by MaximumUploadBytes.
|
||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// This returns fs.ErrorDirNotFound
|
||||
// if the parent directory of the file is not found.
|
||||
func (f *Fs) overwriteFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetFileInDirectory(path)
|
||||
|
||||
var err error
|
||||
if !modTime.IsZero() {
|
||||
err = parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file",
|
||||
Body: content,
|
||||
ContentType: "application/octet-stream",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// Reset the reading index (in case this is a retry).
|
||||
if _, err = content.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// uploadFileChunked updates the content of the existing file at the given path
|
||||
// with the content of the io.Reader.
|
||||
// Returns the position of the last successfully written byte, stopping before the first failed write.
|
||||
// If nothing was written this will be 0.
|
||||
// Returns the resulting api-object if successful.
|
||||
//
|
||||
// Replaces the file contents by uploading multiple chunks of the given size in parallel.
|
||||
// Therefore this can and be used to upload files of any size efficiently.
|
||||
// The number of parallel transfers is limited by transferLimit which should larger than 0.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// NOTE: This method uses updateFileChunked and may create sparse files,
|
||||
// if the upload of a chunk fails unexpectedly.
|
||||
// See note about sparse files in patchFile.
|
||||
// If any of the uploads fail, the process will be aborted and
|
||||
// the first error that occurred will be returned.
|
||||
// This is not an atomic operation,
|
||||
// therefore if the upload fails the file may be partially modified.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) uploadFileChunked(ctx context.Context, path string, content io.Reader, modTime time.Time, chunkSize int, transferLimit int64) (okSize uint64, info *api.HiDriveObject, err error) {
|
||||
okSize, err = f.updateFileChunked(ctx, path, content, 0, chunkSize, transferLimit)
|
||||
|
||||
if err == nil {
|
||||
info, err = f.resizeFile(ctx, path, okSize, modTime)
|
||||
}
|
||||
return okSize, info, err
|
||||
}
|
||||
|
||||
// updateFileChunked updates the content of the existing file at the given path
|
||||
// starting at the given offset.
|
||||
// Returns the position of the last successfully written byte, stopping before the first failed write.
|
||||
// If nothing was written this will be 0.
|
||||
//
|
||||
// Replaces the file contents starting from the given byte offset
|
||||
// with the content of the io.Reader.
|
||||
// If the offset is beyond the file end, the file is extended up to the offset.
|
||||
//
|
||||
// The upload is done multiple chunks of the given size in parallel.
|
||||
// Therefore this can and be used to upload files of any size efficiently.
|
||||
// The number of parallel transfers is limited by transferLimit which should larger than 0.
|
||||
//
|
||||
// NOTE: Because it is inefficient to set the modification time with every chunk,
|
||||
// setting it to a specific value must be done in a separate request
|
||||
// after this operation finishes.
|
||||
//
|
||||
// NOTE: This method uses patchFile and may create sparse files,
|
||||
// especially if the upload of a chunk fails unexpectedly.
|
||||
// See note about sparse files in patchFile.
|
||||
// If any of the uploads fail, the process will be aborted and
|
||||
// the first error that occurred will be returned.
|
||||
// This is not an atomic operation,
|
||||
// therefore if the upload fails the file may be partially modified.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) updateFileChunked(ctx context.Context, path string, content io.Reader, offset uint64, chunkSize int, transferLimit int64) (okSize uint64, err error) {
|
||||
var (
|
||||
okChunksMu sync.Mutex // protects the variables below
|
||||
okChunks []ranges.Range
|
||||
)
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
transferSemaphore := semaphore.NewWeighted(transferLimit)
|
||||
|
||||
var readErr error
|
||||
startMoreTransfers := true
|
||||
zeroTime := time.Time{}
|
||||
for chunk := uint64(0); startMoreTransfers; chunk++ {
|
||||
// Acquire semaphore to limit number of transfers in parallel.
|
||||
readErr = transferSemaphore.Acquire(gCtx, 1)
|
||||
if readErr != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Read a chunk of data.
|
||||
chunkReader, bytesRead, readErr := readerForChunk(content, chunkSize)
|
||||
if bytesRead < chunkSize {
|
||||
startMoreTransfers = false
|
||||
}
|
||||
if readErr != nil || bytesRead <= 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Transfer the chunk.
|
||||
chunkOffset := uint64(chunkSize)*chunk + offset
|
||||
g.Go(func() error {
|
||||
// After this upload is done,
|
||||
// signal that another transfer can be started.
|
||||
defer transferSemaphore.Release(1)
|
||||
uploadErr := f.patchFile(gCtx, path, cachedReader(chunkReader), chunkOffset, zeroTime)
|
||||
if uploadErr == nil {
|
||||
// Remember successfully written chunks.
|
||||
okChunksMu.Lock()
|
||||
okChunks = append(okChunks, ranges.Range{Pos: int64(chunkOffset), Size: int64(bytesRead)})
|
||||
okChunksMu.Unlock()
|
||||
fs.Debugf(f, "Done uploading chunk of size %v at offset %v.", bytesRead, chunkOffset)
|
||||
} else {
|
||||
fs.Infof(f, "Error while uploading chunk at offset %v. Error is %v.", chunkOffset, uploadErr)
|
||||
}
|
||||
return uploadErr
|
||||
})
|
||||
}
|
||||
|
||||
if readErr != nil {
|
||||
// Log the error in case it is later ignored because of an upload-error.
|
||||
fs.Infof(f, "Error while reading/preparing to upload a chunk. Error is %v.", readErr)
|
||||
}
|
||||
|
||||
err = g.Wait()
|
||||
|
||||
// Compute the first continuous range of the file content,
|
||||
// which does not contain any failed chunks.
|
||||
// Do not forget to add the file content up to the starting offset,
|
||||
// which is presumed to be already correct.
|
||||
rs := ranges.Ranges{}
|
||||
rs.Insert(ranges.Range{Pos: 0, Size: int64(offset)})
|
||||
for _, chunkRange := range okChunks {
|
||||
rs.Insert(chunkRange)
|
||||
}
|
||||
if len(rs) > 0 && rs[0].Pos == 0 {
|
||||
okSize = uint64(rs[0].Size)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return okSize, err
|
||||
}
|
||||
if readErr != nil {
|
||||
return okSize, readErr
|
||||
}
|
||||
|
||||
return okSize, nil
|
||||
}
|
||||
|
||||
// patchFile updates the content of the existing file at the given path
|
||||
// starting at the given offset.
|
||||
//
|
||||
// Replaces the file contents starting from the given byte offset
|
||||
// with the content of the io.ReadSeeker.
|
||||
// If the offset is beyond the file end, the file is extended up to the offset.
|
||||
// The maximum size of the update is limited by MaximumUploadBytes.
|
||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// NOTE: By extending the file up to the offset this may create sparse files,
|
||||
// which allocate less space on the file system than their apparent size indicates,
|
||||
// since holes between data chunks are "real" holes
|
||||
// and not regions made up of consecutive 0-bytes.
|
||||
// Subsequent operations (such as copying data)
|
||||
// usually expand the holes into regions of 0-bytes.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) patchFile(ctx context.Context, path string, content io.ReadSeeker, offset uint64, modTime time.Time) error {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
parameters.Set("offset", strconv.FormatUint(offset, 10))
|
||||
|
||||
if !modTime.IsZero() {
|
||||
err := parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/file",
|
||||
Body: content,
|
||||
ContentType: "application/octet-stream",
|
||||
Parameters: parameters.Values,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// Reset the reading index (in case this is a retry).
|
||||
_, err = content.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if isHTTPError(err, 423) {
|
||||
return true, err
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if isHTTPError(err, 404) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// resizeFile updates the existing file at the given path to be of the given size
|
||||
// and returns the resulting api-object if successful.
|
||||
//
|
||||
// If the given size is smaller than the current filesize,
|
||||
// the file is cut/truncated at that position.
|
||||
// If the given size is larger, the file is extended up to that position.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// NOTE: By extending the file this may create sparse files,
|
||||
// which allocate less space on the file system than their apparent size indicates,
|
||||
// since holes between data chunks are "real" holes
|
||||
// and not regions made up of consecutive 0-bytes.
|
||||
// Subsequent operations (such as copying data)
|
||||
// usually expand the holes into regions of 0-bytes.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) resizeFile(ctx context.Context, path string, size uint64, modTime time.Time) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
parameters.Set("size", strconv.FormatUint(size, 10))
|
||||
|
||||
if !modTime.IsZero() {
|
||||
err := parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/truncate",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// isHTTPError compares the numerical status code
|
||||
// of an api.Error to the given HTTP status.
|
||||
//
|
||||
// If the given error is not an api.Error or
|
||||
// a numerical status code could not be determined, this returns false.
|
||||
// Otherwise this returns whether the status code of the error is equal to the given status.
|
||||
func isHTTPError(err error, status int64) bool {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
errStatus, decodeErr := apiErr.Code.Int64()
|
||||
if decodeErr == nil && errStatus == status {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// createHiDriveScopes creates oauth-scopes
|
||||
// from the given user-role and access-permissions.
|
||||
//
|
||||
// If the arguments are empty, they will not be included in the result.
|
||||
func createHiDriveScopes(role string, access string) []string {
|
||||
switch {
|
||||
case role != "" && access != "":
|
||||
return []string{access + "," + role}
|
||||
case role != "":
|
||||
return []string{role}
|
||||
case access != "":
|
||||
return []string{access}
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// cachedReader returns a version of the reader that caches its contents and
|
||||
// can therefore be reset using Seek.
|
||||
func cachedReader(reader io.Reader) io.ReadSeeker {
|
||||
bytesReader, ok := reader.(*bytes.Reader)
|
||||
if ok {
|
||||
return bytesReader
|
||||
}
|
||||
|
||||
repeatableReader, ok := reader.(*readers.RepeatableReader)
|
||||
if ok {
|
||||
return repeatableReader
|
||||
}
|
||||
|
||||
return readers.NewRepeatableReader(reader)
|
||||
}
|
||||
|
||||
// readerForChunk reads a chunk of bytes from reader (after handling any accounting).
|
||||
// Returns a new io.Reader (chunkReader) for that chunk
|
||||
// and the number of bytes that have been read from reader.
|
||||
func readerForChunk(reader io.Reader, length int) (chunkReader io.Reader, bytesRead int, err error) {
|
||||
// Unwrap any accounting from the input if present.
|
||||
reader, wrap := accounting.UnWrap(reader)
|
||||
|
||||
// Read a chunk of data.
|
||||
buffer := make([]byte, length)
|
||||
bytesRead, err = io.ReadFull(reader, buffer)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, bytesRead, err
|
||||
}
|
||||
// Truncate unused capacity.
|
||||
buffer = buffer[:bytesRead]
|
||||
|
||||
// Use wrap to put any accounting back for chunkReader.
|
||||
return wrap(bytes.NewReader(buffer)), bytesRead, nil
|
||||
}
|
||||
1002
backend/hidrive/hidrive.go
Normal file
1002
backend/hidrive/hidrive.go
Normal file
File diff suppressed because it is too large
Load Diff
45
backend/hidrive/hidrive_test.go
Normal file
45
backend/hidrive/hidrive_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// Test HiDrive filesystem interface
|
||||
package hidrive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote.
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestHiDrive"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: 1,
|
||||
MaxChunkSize: MaximumUploadBytes,
|
||||
CeilChunkSize: nil,
|
||||
NeedMultipleChunks: false,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Change the configured UploadChunkSize.
|
||||
// Will only be called while no transfer is in progress.
|
||||
func (f *Fs) SetUploadChunkSize(chunksize fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
var old fs.SizeSuffix
|
||||
old, f.opt.UploadChunkSize = f.opt.UploadChunkSize, chunksize
|
||||
return old, nil
|
||||
}
|
||||
|
||||
// Change the configured UploadCutoff.
|
||||
// Will only be called while no transfer is in progress.
|
||||
func (f *Fs) SetUploadCutoff(cutoff fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
var old fs.SizeSuffix
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cutoff
|
||||
return old, nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
410
backend/hidrive/hidrivehash/hidrivehash.go
Normal file
410
backend/hidrive/hidrivehash/hidrivehash.go
Normal file
@@ -0,0 +1,410 @@
|
||||
// Package hidrivehash implements the HiDrive hashing algorithm which combines SHA-1 hashes hierarchically to a single top-level hash.
|
||||
//
|
||||
// Note: This implementation does not grant access to any partial hashes generated.
|
||||
//
|
||||
// See: https://developer.hidrive.com/wp-content/uploads/2021/07/HiDrive_Synchronization-v3.3-rev28.pdf
|
||||
// (link to newest version: https://static.hidrive.com/dev/0001)
|
||||
package hidrivehash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize of the checksum in bytes.
|
||||
BlockSize = 4096
|
||||
// Size of the checksum in bytes.
|
||||
Size = sha1.Size
|
||||
// sumsPerLevel is the number of checksums
|
||||
sumsPerLevel = 256
|
||||
)
|
||||
|
||||
var (
|
||||
// zeroSum is a special hash consisting of 20 null-bytes.
|
||||
// This will be the hash of any empty file (or ones containing only null-bytes).
|
||||
zeroSum = [Size]byte{}
|
||||
// ErrorInvalidEncoding is returned when a hash should be decoded from a binary form that is invalid.
|
||||
ErrorInvalidEncoding = errors.New("encoded binary form is invalid for this hash")
|
||||
// ErrorHashFull is returned when a hash reached its capacity and cannot accept any more input.
|
||||
ErrorHashFull = errors.New("hash reached its capacity")
|
||||
)
|
||||
|
||||
// writeByBlock writes len(p) bytes from p to the io.Writer in blocks of size blockSize.
|
||||
// It returns the number of bytes written from p (0 <= n <= len(p))
|
||||
// and any error encountered that caused the write to stop early.
|
||||
//
|
||||
// A pointer bytesInBlock to a counter needs to be supplied,
|
||||
// that is used to keep track how many bytes have been written to the writer already.
|
||||
// A pointer onlyNullBytesInBlock to a boolean needs to be supplied,
|
||||
// that is used to keep track whether the block so far only consists of null-bytes.
|
||||
// The callback onBlockWritten is called whenever a full block has been written to the writer
|
||||
// and is given as input the number of bytes that still need to be written.
|
||||
func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *uint32, onlyNullBytesInBlock *bool, onBlockWritten func(remaining int) error) (n int, err error) {
|
||||
total := len(p)
|
||||
nullBytes := make([]byte, blockSize)
|
||||
for len(p) > 0 {
|
||||
toWrite := int(blockSize - *bytesInBlock)
|
||||
if toWrite > len(p) {
|
||||
toWrite = len(p)
|
||||
}
|
||||
c, err := writer.Write(p[:toWrite])
|
||||
*bytesInBlock += uint32(c)
|
||||
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
|
||||
// Discard data written through a reslice
|
||||
p = p[c:]
|
||||
if err != nil {
|
||||
return total - len(p), err
|
||||
}
|
||||
if *bytesInBlock == blockSize {
|
||||
err = onBlockWritten(len(p))
|
||||
if err != nil {
|
||||
return total - len(p), err
|
||||
}
|
||||
*bytesInBlock = 0
|
||||
*onlyNullBytesInBlock = true
|
||||
}
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// level is a hash.Hash that is used to aggregate the checksums produced by the level hierarchically beneath it.
|
||||
// It is used to represent any level-n hash, except for level-0.
|
||||
type level struct {
|
||||
checksum [Size]byte // aggregated checksum of this level
|
||||
sumCount uint32 // number of sums contained in this level so far
|
||||
bytesInHasher uint32 // number of bytes written into hasher so far
|
||||
onlyNullBytesInHasher bool // whether the hasher only contains null-bytes so far
|
||||
hasher hash.Hash
|
||||
}
|
||||
|
||||
// NewLevel returns a new hash.Hash computing any level-n hash, except level-0.
|
||||
func NewLevel() hash.Hash {
|
||||
l := &level{}
|
||||
l.Reset()
|
||||
return l
|
||||
}
|
||||
|
||||
// Add takes a position-embedded SHA-1 checksum and adds it to the level.
|
||||
func (l *level) Add(sha1sum []byte) {
|
||||
var tmp uint
|
||||
var carry bool
|
||||
for i := Size - 1; i >= 0; i-- {
|
||||
tmp = uint(sha1sum[i]) + uint(l.checksum[i])
|
||||
if carry {
|
||||
tmp++
|
||||
}
|
||||
carry = tmp > 255
|
||||
l.checksum[i] = byte(tmp)
|
||||
}
|
||||
}
|
||||
|
||||
// IsFull returns whether the number of checksums added to this level reached its capacity.
|
||||
func (l *level) IsFull() bool {
|
||||
return l.sumCount >= sumsPerLevel
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// Contrary to the specification from hash.Hash, this DOES return an error,
|
||||
// specifically ErrorHashFull if and only if IsFull() returns true.
|
||||
func (l *level) Write(p []byte) (n int, err error) {
|
||||
if l.IsFull() {
|
||||
return 0, ErrorHashFull
|
||||
}
|
||||
onBlockWritten := func(remaining int) error {
|
||||
if !l.onlyNullBytesInHasher {
|
||||
c, err := l.hasher.Write([]byte{byte(l.sumCount)})
|
||||
l.bytesInHasher += uint32(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Add(l.hasher.Sum(nil))
|
||||
}
|
||||
l.sumCount++
|
||||
l.hasher.Reset()
|
||||
if remaining > 0 && l.IsFull() {
|
||||
return ErrorHashFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return writeByBlock(p, l.hasher, uint32(l.BlockSize()), &l.bytesInHasher, &l.onlyNullBytesInHasher, onBlockWritten)
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (l *level) Sum(b []byte) []byte {
|
||||
return append(b, l.checksum[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (l *level) Reset() {
|
||||
l.checksum = zeroSum // clear the current checksum
|
||||
l.sumCount = 0
|
||||
l.bytesInHasher = 0
|
||||
l.onlyNullBytesInHasher = true
|
||||
l.hasher = sha1.New()
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (l *level) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (l *level) BlockSize() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// MarshalBinary encodes the hash into a binary form and returns the result.
|
||||
func (l *level) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, Size+4+4+1)
|
||||
copy(b, l.checksum[:])
|
||||
binary.BigEndian.PutUint32(b[Size:], l.sumCount)
|
||||
binary.BigEndian.PutUint32(b[Size+4:], l.bytesInHasher)
|
||||
if l.onlyNullBytesInHasher {
|
||||
b[Size+4+4] = 1
|
||||
}
|
||||
encodedHasher, err := l.hasher.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = append(b, encodedHasher...)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
|
||||
// The hash will replace its internal state accordingly.
|
||||
func (l *level) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < Size+4+4+1 {
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
copy(l.checksum[:], b)
|
||||
l.sumCount = binary.BigEndian.Uint32(b[Size:])
|
||||
l.bytesInHasher = binary.BigEndian.Uint32(b[Size+4:])
|
||||
switch b[Size+4+4] {
|
||||
case 0:
|
||||
l.onlyNullBytesInHasher = false
|
||||
case 1:
|
||||
l.onlyNullBytesInHasher = true
|
||||
default:
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
err := l.hasher.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[Size+4+4+1:])
|
||||
return err
|
||||
}
|
||||
|
||||
// hidriveHash is the hash computing the actual checksum used by HiDrive by combining multiple level-hashes.
|
||||
type hidriveHash struct {
|
||||
levels []*level // collection of level-hashes, one for each level starting at level-1
|
||||
lastSumWritten [Size]byte // the last checksum written to any of the levels
|
||||
bytesInBlock uint32 // bytes written into blockHash so far
|
||||
onlyNullBytesInBlock bool // whether the hasher only contains null-bytes so far
|
||||
blockHash hash.Hash
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the HiDrive checksum.
|
||||
func New() hash.Hash {
|
||||
h := &hidriveHash{}
|
||||
h.Reset()
|
||||
return h
|
||||
}
|
||||
|
||||
// aggregateToLevel writes the checksum to the level at the given index
|
||||
// and if necessary propagates any changes to levels above.
|
||||
func (h *hidriveHash) aggregateToLevel(index int, sum []byte) {
|
||||
for i := index; ; i++ {
|
||||
if i >= len(h.levels) {
|
||||
h.levels = append(h.levels, NewLevel().(*level))
|
||||
}
|
||||
_, err := h.levels[i].Write(sum)
|
||||
copy(h.lastSumWritten[:], sum)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("level-hash should not have produced an error: %w", err))
|
||||
}
|
||||
if !h.levels[i].IsFull() {
|
||||
break
|
||||
}
|
||||
sum = h.levels[i].Sum(nil)
|
||||
h.levels[i].Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
func (h *hidriveHash) Write(p []byte) (n int, err error) {
|
||||
onBlockWritten := func(remaining int) error {
|
||||
var sum []byte
|
||||
if h.onlyNullBytesInBlock {
|
||||
sum = zeroSum[:]
|
||||
} else {
|
||||
sum = h.blockHash.Sum(nil)
|
||||
}
|
||||
h.blockHash.Reset()
|
||||
h.aggregateToLevel(0, sum)
|
||||
return nil
|
||||
}
|
||||
return writeByBlock(p, h.blockHash, uint32(BlockSize), &h.bytesInBlock, &h.onlyNullBytesInBlock, onBlockWritten)
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (h *hidriveHash) Sum(b []byte) []byte {
|
||||
// Save internal state.
|
||||
state, err := h.MarshalBinary()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("saving the internal state should not have produced an error: %w", err))
|
||||
}
|
||||
|
||||
if h.bytesInBlock > 0 {
|
||||
// Fill remainder of block with null-bytes.
|
||||
filler := make([]byte, h.BlockSize()-int(h.bytesInBlock))
|
||||
_, err = h.Write(filler)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("filling with null-bytes should not have an error: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
checksum := zeroSum
|
||||
for i := 0; i < len(h.levels); i++ {
|
||||
level := h.levels[i]
|
||||
if i < len(h.levels)-1 {
|
||||
// Aggregate non-empty non-final levels.
|
||||
if level.sumCount >= 1 {
|
||||
h.aggregateToLevel(i+1, level.Sum(nil))
|
||||
level.Reset()
|
||||
}
|
||||
} else {
|
||||
// Determine sum of final level.
|
||||
if level.sumCount > 1 {
|
||||
copy(checksum[:], level.Sum(nil))
|
||||
} else {
|
||||
// This is needed, otherwise there is no way to return
|
||||
// the non-position-embedded checksum.
|
||||
checksum = h.lastSumWritten
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore internal state.
|
||||
err = h.UnmarshalBinary(state)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("restoring the internal state should not have produced an error: %w", err))
|
||||
}
|
||||
|
||||
return append(b, checksum[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (h *hidriveHash) Reset() {
|
||||
h.levels = nil
|
||||
h.lastSumWritten = zeroSum // clear the last written checksum
|
||||
h.bytesInBlock = 0
|
||||
h.onlyNullBytesInBlock = true
|
||||
h.blockHash = sha1.New()
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (h *hidriveHash) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (h *hidriveHash) BlockSize() int {
|
||||
return BlockSize
|
||||
}
|
||||
|
||||
// MarshalBinary encodes the hash into a binary form and returns the result.
|
||||
func (h *hidriveHash) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, Size+4+1+8)
|
||||
copy(b, h.lastSumWritten[:])
|
||||
binary.BigEndian.PutUint32(b[Size:], h.bytesInBlock)
|
||||
if h.onlyNullBytesInBlock {
|
||||
b[Size+4] = 1
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(b[Size+4+1:], uint64(len(h.levels)))
|
||||
for _, level := range h.levels {
|
||||
encodedLevel, err := level.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encodedLength := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(encodedLength, uint64(len(encodedLevel)))
|
||||
b = append(b, encodedLength...)
|
||||
b = append(b, encodedLevel...)
|
||||
}
|
||||
encodedBlockHash, err := h.blockHash.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = append(b, encodedBlockHash...)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
|
||||
// The hash will replace its internal state accordingly.
|
||||
func (h *hidriveHash) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < Size+4+1+8 {
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
copy(h.lastSumWritten[:], b)
|
||||
h.bytesInBlock = binary.BigEndian.Uint32(b[Size:])
|
||||
switch b[Size+4] {
|
||||
case 0:
|
||||
h.onlyNullBytesInBlock = false
|
||||
case 1:
|
||||
h.onlyNullBytesInBlock = true
|
||||
default:
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
|
||||
amount := binary.BigEndian.Uint64(b[Size+4+1:])
|
||||
h.levels = make([]*level, int(amount))
|
||||
offset := Size + 4 + 1 + 8
|
||||
for i := range h.levels {
|
||||
length := int(binary.BigEndian.Uint64(b[offset:]))
|
||||
offset += 8
|
||||
h.levels[i] = NewLevel().(*level)
|
||||
err := h.levels[i].UnmarshalBinary(b[offset : offset+length])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offset += length
|
||||
}
|
||||
err := h.blockHash.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[offset:])
|
||||
return err
|
||||
}
|
||||
|
||||
// Sum returns the HiDrive checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
h := New().(*hidriveHash)
|
||||
_, _ = h.Write(data)
|
||||
var result [Size]byte
|
||||
copy(result[:], h.Sum(nil))
|
||||
return result
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ hash.Hash = (*level)(nil)
|
||||
_ encoding.BinaryMarshaler = (*level)(nil)
|
||||
_ encoding.BinaryUnmarshaler = (*level)(nil)
|
||||
_ internal.LevelHash = (*level)(nil)
|
||||
_ hash.Hash = (*hidriveHash)(nil)
|
||||
_ encoding.BinaryMarshaler = (*hidriveHash)(nil)
|
||||
_ encoding.BinaryUnmarshaler = (*hidriveHash)(nil)
|
||||
)
|
||||
395
backend/hidrive/hidrivehash/hidrivehash_test.go
Normal file
395
backend/hidrive/hidrivehash/hidrivehash_test.go
Normal file
@@ -0,0 +1,395 @@
|
||||
package hidrivehash_test
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash"
|
||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// helper functions to set up test-tables
|
||||
|
||||
func sha1ArrayAsSlice(sum [sha1.Size]byte) []byte {
|
||||
return sum[:]
|
||||
}
|
||||
|
||||
func mustDecode(hexstring string) []byte {
|
||||
result, err := hex.DecodeString(hexstring)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var testTableLevelPositionEmbedded = []struct {
|
||||
ins [][]byte
|
||||
outs [][]byte
|
||||
name string
|
||||
}{
|
||||
{
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
|
||||
sha1ArrayAsSlice([20]byte{78, 188, 156, 219, 173, 54, 81, 55, 47, 220, 222, 207, 201, 21, 57, 252, 255, 239, 251, 186}),
|
||||
},
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
|
||||
sha1ArrayAsSlice([20]byte{68, 135, 96, 187, 38, 253, 14, 167, 186, 167, 188, 210, 91, 177, 185, 13, 208, 217, 94, 18}),
|
||||
},
|
||||
"documentation-v3.2rev27-example L0 (position-embedded)",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
|
||||
sha1ArrayAsSlice([20]byte{75, 211, 153, 190, 125, 179, 67, 49, 60, 149, 98, 246, 142, 20, 11, 254, 159, 162, 129, 237}),
|
||||
sha1ArrayAsSlice([20]byte{150, 2, 9, 153, 97, 153, 189, 104, 147, 14, 77, 203, 244, 243, 25, 212, 67, 48, 111, 107}),
|
||||
},
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
|
||||
sha1ArrayAsSlice([20]byte{144, 209, 246, 100, 177, 216, 171, 229, 83, 17, 92, 135, 68, 98, 76, 72, 217, 24, 99, 176}),
|
||||
sha1ArrayAsSlice([20]byte{38, 211, 255, 254, 19, 114, 105, 77, 230, 31, 170, 83, 57, 85, 102, 29, 28, 72, 211, 27}),
|
||||
},
|
||||
"documentation-example L0 (position-embedded)",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
|
||||
sha1ArrayAsSlice([20]byte{40, 34, 8, 238, 37, 5, 237, 184, 79, 105, 10, 167, 171, 254, 13, 229, 132, 112, 254, 8}),
|
||||
sha1ArrayAsSlice([20]byte{39, 112, 26, 86, 190, 35, 100, 101, 28, 131, 122, 191, 254, 144, 239, 107, 253, 124, 104, 203}),
|
||||
},
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
|
||||
sha1ArrayAsSlice([20]byte{213, 157, 141, 227, 213, 178, 25, 111, 200, 145, 77, 164, 17, 247, 202, 167, 37, 46, 0, 124}),
|
||||
sha1ArrayAsSlice([20]byte{253, 13, 168, 58, 147, 213, 125, 212, 229, 20, 200, 100, 16, 136, 186, 19, 34, 170, 105, 71}),
|
||||
},
|
||||
"documentation-example L1 (position-embedded)",
|
||||
},
|
||||
}
|
||||
|
||||
var testTableLevel = []struct {
|
||||
ins [][]byte
|
||||
outs [][]byte
|
||||
name string
|
||||
}{
|
||||
{
|
||||
[][]byte{
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
},
|
||||
[][]byte{
|
||||
mustDecode("44fe5ca6342568b4167bf990b64e404a3975e1c3"),
|
||||
mustDecode("90d1f664b1d8abe553115c8744624c48d91863b0"),
|
||||
mustDecode("26d3fffe1372694de61faa533955661d1c48d31b"),
|
||||
},
|
||||
"documentation-example L0",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
||||
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
|
||||
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
|
||||
},
|
||||
[][]byte{
|
||||
mustDecode("ad7b84f5b0ac2bb7792842fc65f9bcc1a0bd0274"),
|
||||
mustDecode("d59d8de3d5b2196fc8914da411f7caa7252e007c"),
|
||||
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
|
||||
},
|
||||
"documentation-example L1",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
},
|
||||
[][]byte{
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
|
||||
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
|
||||
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
|
||||
},
|
||||
"mixed-with-empties",
|
||||
},
|
||||
}
|
||||
|
||||
var testTable = []struct {
|
||||
data []byte
|
||||
// pattern describes how to use data to construct the hash-input.
|
||||
// For every entry n at even indices this repeats the data n times.
|
||||
// For every entry m at odd indices this repeats a null-byte m times.
|
||||
// The input-data is constructed by concatenating the results in order.
|
||||
pattern []int64
|
||||
out []byte
|
||||
name string
|
||||
}{
|
||||
{
|
||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
||||
[]int64{64},
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
"documentation-example L0",
|
||||
},
|
||||
{
|
||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
||||
[]int64{64 * 256},
|
||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
||||
"documentation-example L1",
|
||||
},
|
||||
{
|
||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
||||
[]int64{64 * 256, 0, 64 * 128, 4096 * 128, 64*2 + 32},
|
||||
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
|
||||
"documentation-example L2",
|
||||
},
|
||||
{
|
||||
[]byte("hello rclone\n"),
|
||||
[]int64{316},
|
||||
mustDecode("72370f9c18a2c20b31d71f3f4cee7a3cd2703737"),
|
||||
"not-block-aligned",
|
||||
},
|
||||
{
|
||||
[]byte("hello rclone\n"),
|
||||
[]int64{13, 4096 * 3, 4},
|
||||
mustDecode("a6990b81791f0d2db750b38f046df321c975aa60"),
|
||||
"not-block-aligned-with-null-bytes",
|
||||
},
|
||||
{
|
||||
[]byte{},
|
||||
[]int64{},
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
"empty",
|
||||
},
|
||||
{
|
||||
[]byte{},
|
||||
[]int64{0, 4096 * 256 * 256},
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
"null-bytes",
|
||||
},
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func TestLevelAdd(t *testing.T) {
|
||||
for _, test := range testTableLevelPositionEmbedded {
|
||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for i := range test.ins {
|
||||
l.Add(test.ins[i])
|
||||
assert.Equal(t, test.outs[i], l.Sum(nil))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelWrite(t *testing.T) {
|
||||
for _, test := range testTableLevel {
|
||||
l := hidrivehash.NewLevel()
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for i := range test.ins {
|
||||
l.Write(test.ins[i])
|
||||
assert.Equal(t, test.outs[i], l.Sum(nil))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelIsFull(t *testing.T) {
|
||||
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
||||
l := hidrivehash.NewLevel()
|
||||
for i := 0; i < 256; i++ {
|
||||
assert.False(t, l.(internal.LevelHash).IsFull())
|
||||
written, err := l.Write(content[:])
|
||||
assert.Equal(t, len(content), written)
|
||||
if !assert.NoError(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
assert.True(t, l.(internal.LevelHash).IsFull())
|
||||
written, err := l.Write(content[:])
|
||||
assert.True(t, l.(internal.LevelHash).IsFull())
|
||||
assert.Equal(t, 0, written)
|
||||
assert.ErrorIs(t, err, hidrivehash.ErrorHashFull)
|
||||
}
|
||||
|
||||
func TestLevelReset(t *testing.T) {
|
||||
l := hidrivehash.NewLevel()
|
||||
zeroHash := l.Sum(nil)
|
||||
_, err := l.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19})
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotEqual(t, zeroHash, l.Sum(nil))
|
||||
l.Reset()
|
||||
assert.Equal(t, zeroHash, l.Sum(nil))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelSize(t *testing.T) {
|
||||
l := hidrivehash.NewLevel()
|
||||
assert.Equal(t, 20, l.Size())
|
||||
}
|
||||
|
||||
func TestLevelBlockSize(t *testing.T) {
|
||||
l := hidrivehash.NewLevel()
|
||||
assert.Equal(t, 20, l.BlockSize())
|
||||
}
|
||||
|
||||
func TestLevelBinaryMarshaler(t *testing.T) {
|
||||
content := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
l.Write(content[:10])
|
||||
encoded, err := l.MarshalBinary()
|
||||
if assert.NoError(t, err) {
|
||||
d := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
err = d.UnmarshalBinary(encoded)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, l.Sum(nil), d.Sum(nil))
|
||||
l.Write(content[10:])
|
||||
d.Write(content[10:])
|
||||
assert.Equal(t, l.Sum(nil), d.Sum(nil))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelInvalidEncoding(t *testing.T) {
|
||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
err := l.UnmarshalBinary([]byte{})
|
||||
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
type infiniteReader struct {
|
||||
source []byte
|
||||
offset int
|
||||
}
|
||||
|
||||
func (m *infiniteReader) Read(b []byte) (int, error) {
|
||||
count := copy(b, m.source[m.offset:])
|
||||
m.offset += count
|
||||
m.offset %= len(m.source)
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func writeInChunks(writer io.Writer, chunkSize int64, data []byte, pattern []int64) error {
|
||||
readers := make([]io.Reader, len(pattern))
|
||||
nullBytes := [4096]byte{}
|
||||
for i, n := range pattern {
|
||||
if i%2 == 0 {
|
||||
readers[i] = io.LimitReader(&infiniteReader{data, 0}, n*int64(len(data)))
|
||||
} else {
|
||||
readers[i] = io.LimitReader(&infiniteReader{nullBytes[:], 0}, n)
|
||||
}
|
||||
}
|
||||
reader := io.MultiReader(readers...)
|
||||
for {
|
||||
_, err := io.CopyN(writer, reader, chunkSize)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
for _, test := range testTable {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern)
|
||||
if assert.NoError(t, err) {
|
||||
normalSum := h.Sum(nil)
|
||||
assert.Equal(t, test.out, normalSum)
|
||||
// Test if different block-sizes produce differing results.
|
||||
for _, blockSize := range []int64{397, 512, 4091, 8192, 10000} {
|
||||
t.Run(fmt.Sprintf("block-size %v", blockSize), func(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
err := writeInChunks(h, blockSize, test.data, test.pattern)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, normalSum, h.Sum(nil))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
zeroHash := h.Sum(nil)
|
||||
_, err := h.Write([]byte{1})
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotEqual(t, zeroHash, h.Sum(nil))
|
||||
h.Reset()
|
||||
assert.Equal(t, zeroHash, h.Sum(nil))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
assert.Equal(t, 20, h.Size())
|
||||
}
|
||||
|
||||
func TestBlockSize(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
assert.Equal(t, 4096, h.BlockSize())
|
||||
}
|
||||
|
||||
func TestBinaryMarshaler(t *testing.T) {
|
||||
for _, test := range testTable {
|
||||
h := hidrivehash.New()
|
||||
d := hidrivehash.New()
|
||||
half := len(test.pattern) / 2
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[:half])
|
||||
assert.NoError(t, err)
|
||||
encoded, err := h.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if assert.NoError(t, err) {
|
||||
err = d.(encoding.BinaryUnmarshaler).UnmarshalBinary(encoded)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, h.Sum(nil), d.Sum(nil))
|
||||
err = writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[half:])
|
||||
assert.NoError(t, err)
|
||||
err = writeInChunks(d, int64(d.BlockSize()), test.data, test.pattern[half:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, h.Sum(nil), d.Sum(nil))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidEncoding(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
err := h.(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte{})
|
||||
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
|
||||
}
|
||||
|
||||
func TestSum(t *testing.T) {
|
||||
assert.Equal(t, [hidrivehash.Size]byte{}, hidrivehash.Sum([]byte{}))
|
||||
content := []byte{1}
|
||||
h := hidrivehash.New()
|
||||
h.Write(content)
|
||||
sum := hidrivehash.Sum(content)
|
||||
assert.Equal(t, h.Sum(nil), sum[:])
|
||||
}
|
||||
18
backend/hidrive/hidrivehash/internal/internal.go
Normal file
18
backend/hidrive/hidrivehash/internal/internal.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Package internal provides utilities for HiDrive.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// LevelHash is an internal interface for level-hashes.
|
||||
type LevelHash interface {
|
||||
encoding.BinaryMarshaler
|
||||
encoding.BinaryUnmarshaler
|
||||
hash.Hash
|
||||
// Add takes a position-embedded checksum and adds it to the level.
|
||||
Add(sum []byte)
|
||||
// IsFull returns whether the number of checksums added to this level reached its capacity.
|
||||
IsFull() bool
|
||||
}
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -35,11 +34,11 @@ var (
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "http",
|
||||
Description: "http Connection",
|
||||
Description: "HTTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "headers",
|
||||
@@ -52,8 +51,7 @@ The input format is comma separated list of key,value pairs. Standard
|
||||
|
||||
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -74,8 +72,9 @@ directories.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing.
|
||||
Help: `Don't use HEAD requests.
|
||||
|
||||
HEAD requests are mainly used to find file sizes in dir listing.
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
directory listing to:
|
||||
@@ -84,12 +83,9 @@ directory listing to:
|
||||
- check it really exists
|
||||
- check to see if it is a directory
|
||||
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
|
||||
- directory listings are much quicker
|
||||
- rclone won't have the times or sizes of any files
|
||||
- some files that don't exist may be in the listing
|
||||
`,
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
that directory listings are much quicker, but rclone won't have the times or
|
||||
sizes of any files, and some files that don't exist may be in the listing.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -133,11 +129,87 @@ func statusError(res *http.Response, err error) error {
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
_ = res.Body.Close()
|
||||
return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
return fmt.Errorf("HTTP Error: %s", res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFsEndpoint decides if url is to be considered a file or directory,
|
||||
// and returns a proper endpoint url to use for the fs.
|
||||
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
|
||||
// If url ends with '/' it is already a proper url always assumed to be a directory.
|
||||
if url[len(url)-1] == '/' {
|
||||
return url, false
|
||||
}
|
||||
|
||||
// If url does not end with '/' we send a HEAD request to decide
|
||||
// if it is directory or file, and if directory appends the missing
|
||||
// '/', or if file returns the directory url to parent instead.
|
||||
createFileResult := func() (string, bool) {
|
||||
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
|
||||
parent, _ := path.Split(url)
|
||||
return parent, true
|
||||
}
|
||||
createDirResult := func() (string, bool) {
|
||||
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
|
||||
return url + "/", false
|
||||
}
|
||||
|
||||
// If HEAD requests are not allowed we just have to assume it is a file.
|
||||
if opt.NoHead {
|
||||
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// Use a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
|
||||
return createFileResult()
|
||||
}
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
|
||||
return createFileResult()
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
|
||||
return createDirResult()
|
||||
}
|
||||
if res.StatusCode == http.StatusMovedPermanently ||
|
||||
res.StatusCode == http.StatusFound ||
|
||||
res.StatusCode == http.StatusSeeOther ||
|
||||
res.StatusCode == http.StatusTemporaryRedirect ||
|
||||
res.StatusCode == http.StatusPermanentRedirect {
|
||||
redir := res.Header.Get("Location")
|
||||
if redir != "" {
|
||||
if redir[len(redir)-1] == '/' {
|
||||
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
|
||||
return createDirResult()
|
||||
}
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
|
||||
return createFileResult()
|
||||
}
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -168,37 +240,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
// Make a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newRoot := u.String()
|
||||
if isFile {
|
||||
// Point to the parent if this is a file
|
||||
newRoot, _ = path.Split(u.String())
|
||||
} else {
|
||||
if !strings.HasSuffix(newRoot, "/") {
|
||||
newRoot += "/"
|
||||
}
|
||||
}
|
||||
|
||||
u, err = url.Parse(newRoot)
|
||||
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
||||
fs.Debugf(nil, "Root: %s", endpoint)
|
||||
u, err = url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -216,12 +260,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(f.endpointURL, "/") {
|
||||
return nil, errors.New("internal error: url doesn't end with /")
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -256,7 +304,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat(ctx)
|
||||
err := o.head(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -268,15 +316,6 @@ func (f *Fs) url(remote string) string {
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
}
|
||||
|
||||
// parse s into an int64, on failure return def
|
||||
func parseInt64(s string, def int64) int64 {
|
||||
n, e := strconv.ParseInt(s, 10, 64)
|
||||
if e != nil {
|
||||
return def
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Errors returned by parseName
|
||||
var (
|
||||
errURLJoinFailed = errors.New("URLJoin failed")
|
||||
@@ -297,7 +336,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
||||
}
|
||||
// check it doesn't have URL parameters
|
||||
uStr := u.String()
|
||||
if strings.Index(uStr, "?") >= 0 {
|
||||
if strings.Contains(uStr, "?") {
|
||||
return "", errFoundQuestionMark
|
||||
}
|
||||
// check that this is going back to the same host and scheme
|
||||
@@ -409,7 +448,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
return nil, fmt.Errorf("readDir: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Can't parse content type %q", contentType)
|
||||
return nil, fmt.Errorf("can't parse content type %q", contentType)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
@@ -451,12 +490,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err := file.stat(ctx); err {
|
||||
switch err := file.head(ctx); err {
|
||||
case nil:
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
add(fs.NewDir(remote, time.Time{}))
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
@@ -468,7 +507,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
add(fs.NewDir(remote, time.Time{}))
|
||||
} else {
|
||||
in <- remote
|
||||
}
|
||||
@@ -530,8 +569,8 @@ func (o *Object) url() string {
|
||||
return o.fs.url(o.remote)
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
// head sends a HEAD request to update info fields in the Object
|
||||
func (o *Object) head(ctx context.Context) error {
|
||||
if o.fs.opt.NoHead {
|
||||
o.size = -1
|
||||
o.modTime = timeUnset
|
||||
@@ -552,13 +591,19 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat: %w", err)
|
||||
}
|
||||
return o.decodeMetadata(ctx, res)
|
||||
}
|
||||
|
||||
// decodeMetadata updates info fields in the Object according to HTTP response headers
|
||||
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
|
||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
t = timeUnset
|
||||
}
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
o.size = rest.ParseSizeFromHeaders(res.Header)
|
||||
|
||||
// If NoSlash is set then check ContentType to see if it is a directory
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
@@ -604,6 +649,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
if err = o.decodeMetadata(ctx, res); err != nil {
|
||||
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,13 +3,15 @@ package http
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -24,17 +26,35 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
lineEndSize = 1
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// prepareServer prepares the test server and shuts it down automatically
|
||||
// when the test completes.
|
||||
func prepareServer(t *testing.T) configmap.Simple {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// verify the file path is correct, and also check which line endings
|
||||
// are used to get sizes right ("\n" except on Windows, but even there
|
||||
// we may have "\n" or "\r\n" depending on git crlf setting)
|
||||
fileList, err := os.ReadDir(filesPath)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(fileList), 0)
|
||||
for _, file := range fileList {
|
||||
if !file.IsDir() {
|
||||
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
if strings.HasSuffix(string(data), "\r\n") {
|
||||
lineEndSize = 2
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
@@ -59,20 +79,21 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
"url": ts.URL,
|
||||
"headers": strings.Join(headers, ","),
|
||||
}
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
// return a function to tidy up
|
||||
return m, ts.Close
|
||||
return m
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
// prepare prepares the test server and shuts it down automatically
|
||||
// when the test completes.
|
||||
func prepare(t *testing.T) fs.Fs {
|
||||
m := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
return f
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
@@ -91,7 +112,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "one%.txt", e.Remote())
|
||||
assert.Equal(t, int64(6), e.Size())
|
||||
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
@@ -108,29 +129,26 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
} else {
|
||||
assert.Equal(t, int64(41), e.Size())
|
||||
assert.Equal(t, int64(40+lineEndSize), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListRoot(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestListRootNoSlash(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
f := prepare(t)
|
||||
f.(*Fs).opt.NoSlash = true
|
||||
defer tidy()
|
||||
|
||||
testListRoot(t, f, true)
|
||||
}
|
||||
|
||||
func TestListSubDir(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
entries, err := f.List(context.Background(), "three")
|
||||
require.NoError(t, err)
|
||||
@@ -141,20 +159,19 @@ func TestListSubDir(t *testing.T) {
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "three/underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestNewObject(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), o.Size())
|
||||
_, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
@@ -175,32 +192,69 @@ func TestNewObject(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
for _, head := range []bool{false, true} {
|
||||
if !head {
|
||||
m.Set("no_head", "true")
|
||||
}
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test normal read
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
for _, rangeRead := range []bool{false, true} {
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
if !head {
|
||||
// Test mod time is still indeterminate
|
||||
tObj := o.ModTime(context.Background())
|
||||
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj))
|
||||
|
||||
// Test file size is still indeterminate
|
||||
assert.Equal(t, int64(-1), o.Size())
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if !rangeRead {
|
||||
// Test normal read
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
}
|
||||
} else {
|
||||
// Test with range request
|
||||
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
// Test the time is always correct on the object after file open
|
||||
tObj := o.ModTime(context.Background())
|
||||
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
|
||||
|
||||
if !rangeRead {
|
||||
// Test the file size
|
||||
assert.Equal(t, int64(len(data)), o.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMimeType(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
@@ -211,8 +265,7 @@ func TestMimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
@@ -221,8 +274,7 @@ func TestIsAFileRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
@@ -236,7 +288,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -353,3 +405,106 @@ func TestParseCaddy(t *testing.T) {
|
||||
"v1.36-22-g06ea13a-ssh-agentβ/",
|
||||
})
|
||||
}
|
||||
|
||||
func TestFsNoSlashRoots(t *testing.T) {
|
||||
// Test Fs with roots that does not end with '/', the logic that
|
||||
// decides if url is to be considered a file or directory, based
|
||||
// on result from a HEAD request.
|
||||
|
||||
// Handler for faking HEAD responses with different status codes
|
||||
headCount := 0
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "HEAD" {
|
||||
headCount++
|
||||
responseCode, err := strconv.Atoi(path.Base(r.URL.String()))
|
||||
require.NoError(t, err)
|
||||
if strings.HasPrefix(r.URL.String(), "/redirect/") {
|
||||
var redir string
|
||||
if strings.HasPrefix(r.URL.String(), "/redirect/file/") {
|
||||
redir = "/redirected"
|
||||
} else if strings.HasPrefix(r.URL.String(), "/redirect/dir/") {
|
||||
redir = "/redirected/"
|
||||
} else {
|
||||
require.Fail(t, "Redirect test requests must start with '/redirect/file/' or '/redirect/dir/'")
|
||||
}
|
||||
http.Redirect(w, r, redir, responseCode)
|
||||
} else {
|
||||
http.Error(w, http.StatusText(responseCode), responseCode)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(handler)
|
||||
defer ts.Close()
|
||||
|
||||
// Configure the remote
|
||||
configfile.Install()
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
// Test
|
||||
for i, test := range []struct {
|
||||
root string
|
||||
isFile bool
|
||||
}{
|
||||
// 2xx success
|
||||
{"parent/200", true},
|
||||
{"parent/204", true},
|
||||
|
||||
// 3xx redirection Redirect status 301, 302, 303, 307, 308
|
||||
{"redirect/file/301", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/301", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/302", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/302", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/303", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/303", false}, // Request is redirected to "/redirected/"
|
||||
|
||||
{"redirect/file/304", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
{"redirect/file/305", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
{"redirect/file/306", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
|
||||
{"redirect/file/307", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/307", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/308", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/308", false}, // Request is redirected to "/redirected/"
|
||||
|
||||
// 4xx client errors
|
||||
{"parent/403", true}, // Forbidden status (head request blocked)
|
||||
{"parent/404", false}, // Not found status
|
||||
} {
|
||||
for _, noHead := range []bool{false, true} {
|
||||
var isFile bool
|
||||
if noHead {
|
||||
m.Set("no_head", "true")
|
||||
isFile = true
|
||||
} else {
|
||||
m.Set("no_head", "false")
|
||||
isFile = test.isFile
|
||||
}
|
||||
headCount = 0
|
||||
f, err := NewFs(context.Background(), remoteName, test.root, m)
|
||||
if noHead {
|
||||
assert.Equal(t, 0, headCount)
|
||||
} else {
|
||||
assert.Equal(t, 1, headCount)
|
||||
}
|
||||
if isFile {
|
||||
assert.ErrorIs(t, err, fs.ErrorIsFile)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
var endpoint string
|
||||
if isFile {
|
||||
parent, _ := path.Split(test.root)
|
||||
endpoint = "/" + parent
|
||||
} else {
|
||||
endpoint = "/" + test.root + "/"
|
||||
}
|
||||
what := fmt.Sprintf("i=%d, root=%q, isFile=%v, noHead=%v", i, test.root, isFile, noHead)
|
||||
assert.Equal(t, ts.URL+endpoint, f.String(), what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift
|
||||
type auth struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// newAuth creates a swift authenticator
|
||||
func newAuth(f *Fs) *auth {
|
||||
return &auth{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Request constructs an http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||
return a.f.credentials.Endpoint
|
||||
}
|
||||
|
||||
// The access token
|
||||
func (a *auth) Token() string {
|
||||
return a.f.credentials.Token
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ swift.Authenticator = (*auth)(nil)
|
||||
@@ -1,200 +0,0 @@
|
||||
// Package hubic provides an interface to the Hubic object storage
|
||||
// system.
|
||||
package hubic
|
||||
|
||||
// This uses the normal swift mechanism to update the credentials and
|
||||
// ignores the expires field returned by the Hubic API. This may need
|
||||
// to be revisited after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
|
||||
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"credentials.r", // Read OpenStack credentials
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.hubic.com/oauth/auth/",
|
||||
TokenURL: "https://api.hubic.com/oauth/token/",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
// credentials is the JSON returned from the Hubic API to read the
|
||||
// OpenStack credentials
|
||||
type credentials struct {
|
||||
Token string `json:"token"` // OpenStack token
|
||||
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||
}
|
||||
|
||||
// Fs represents a remote hubic
|
||||
type Fs struct {
|
||||
fs.Fs // wrapped Fs
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // client for oauth api
|
||||
credentials credentials // returned from the Hubic API
|
||||
expires time.Time // time credentials expire
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
type Object struct {
|
||||
*swift.Object
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Object.String()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.Fs == nil {
|
||||
return "Hubic"
|
||||
}
|
||||
return fmt.Sprintf("Hubic %s", f.Fs.String())
|
||||
}
|
||||
|
||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
err = decoder.Decode(&result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(f, "Got credentials %+v", result)
|
||||
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
|
||||
return errors.New("couldn't read token, result and expired from credentials")
|
||||
}
|
||||
f.credentials = result
|
||||
expires, err := time.Parse(time.RFC3339, result.Expires)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.expires = expires
|
||||
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, f.expires.Sub(time.Now()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
client: client,
|
||||
}
|
||||
|
||||
// Make the swift Connection
|
||||
ci := fs.GetConfig(ctx)
|
||||
c := &swiftLib.Connection{
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
}
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
opt := new(swift.Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = swiftFs
|
||||
f.features = f.Fs.Features().Wrap(f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
)
|
||||
@@ -1,19 +0,0 @@
|
||||
// Test Hubic filesystem interface
|
||||
package hubic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hubic"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
SkipObjectCheckWrap: true,
|
||||
})
|
||||
}
|
||||
1295
backend/internetarchive/internetarchive.go
Normal file
1295
backend/internetarchive/internetarchive.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/internetarchive/internetarchive_test.go
Normal file
17
backend/internetarchive/internetarchive_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test internetarchive filesystem interface
|
||||
package internetarchive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/internetarchive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestIA:lesmi-rclone-test/",
|
||||
NilObject: (*internetarchive.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Jottacloud API.
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -8,42 +9,69 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// default time format for almost all request and responses
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
// the API server seems to use a different format
|
||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
||||
// default time format historically used for all request and responses.
|
||||
// Similar to time.RFC3339, but with an extra '-' in front of 'T',
|
||||
// and no ':' separator in timezone offset. Some newer endpoints have
|
||||
// moved to proper time.RFC3339 conformant format instead.
|
||||
jottaTimeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
type Time time.Time
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// unmarshalXML turns XML into a Time
|
||||
func unmarshalXMLTime(d *xml.Decoder, start xml.StartElement, timeFormat string) (time.Time, error) {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
return time.Time{}, err
|
||||
}
|
||||
if v == "" {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
return time.Time{}, nil
|
||||
}
|
||||
newTime, err := time.Parse(timeFormat, v)
|
||||
if err == nil {
|
||||
*t = Time(newTime)
|
||||
return newTime, nil
|
||||
}
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
// JottaTime represents time values in the classic API using a custom RFC3339 like format
|
||||
type JottaTime time.Time
|
||||
|
||||
// String returns JottaTime string in Jottacloud classic format
|
||||
func (t JottaTime) String() string { return time.Time(t).Format(jottaTimeFormat) }
|
||||
|
||||
// UnmarshalXML turns XML into a JottaTime
|
||||
func (t *JottaTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
tm, err := unmarshalXMLTime(d, start, jottaTimeFormat)
|
||||
*t = JottaTime(tm)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Time into XML
|
||||
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
// MarshalXML turns a JottaTime into XML
|
||||
func (t *JottaTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// Rfc3339Time represents time values in the newer APIs using standard RFC3339 format
|
||||
type Rfc3339Time time.Time
|
||||
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
// String returns Rfc3339Time string in Jottacloud RFC3339 format
|
||||
func (t Rfc3339Time) String() string { return time.Time(t).Format(time.RFC3339) }
|
||||
|
||||
// UnmarshalXML turns XML into a Rfc3339Time
|
||||
func (t *Rfc3339Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
tm, err := unmarshalXMLTime(d, start, time.RFC3339)
|
||||
*t = Rfc3339Time(tm)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Rfc3339Time into XML
|
||||
func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// MarshalJSON turns a Rfc3339Time into JSON
|
||||
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
|
||||
}
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
type LoginToken struct {
|
||||
@@ -122,16 +150,11 @@ type AllocateFileResponse struct {
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
Path string `json:"path"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified int64 `json:"modified"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
@@ -338,9 +361,9 @@ type JottaFolder struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
Path string `xml:"path"`
|
||||
CreatedAt Time `xml:"created"`
|
||||
ModifiedAt Time `xml:"modified"`
|
||||
Updated Time `xml:"updated"`
|
||||
CreatedAt JottaTime `xml:"created"`
|
||||
ModifiedAt JottaTime `xml:"modified"`
|
||||
Updated JottaTime `xml:"updated"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
@@ -365,17 +388,17 @@ GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
|
||||
// JottaFile represents a Jottacloud file
|
||||
type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||
Updated Time `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt JottaTime `xml:"currentRevision>created"`
|
||||
ModifiedAt JottaTime `xml:"currentRevision>modified"`
|
||||
Updated JottaTime `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
}
|
||||
|
||||
// Error is a custom Error for wrapping Jottacloud error responses
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package jottacloud provides an interface to the Jottacloud storage system.
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
@@ -7,10 +8,10 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -45,9 +46,9 @@ const (
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://jfs.jottacloud.com/jfs/"
|
||||
jfsURL = "https://jfs.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
wwwURL = "https://www.jottacloud.com/"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
@@ -126,7 +127,7 @@ func init() {
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
case "":
|
||||
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type.`, []fs.OptionExample{{
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
|
||||
Value: "standard",
|
||||
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
||||
}, {
|
||||
@@ -144,7 +145,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
return fs.ConfigGoto(config.Result)
|
||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
case "standard_token":
|
||||
loginToken := config.Result
|
||||
m.Set(configClientID, defaultClientID)
|
||||
@@ -190,7 +191,7 @@ machines.`)
|
||||
m.Set("auth_code", "")
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_auth_code":
|
||||
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
m.Set("auth_code", authCode)
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_do_auth":
|
||||
@@ -261,7 +262,11 @@ machines.`)
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
section of the official Jottacloud client. If you instead want to access the
|
||||
sync or the backup section, for example, you must choose yes.`)
|
||||
|
||||
case "choose_device_query":
|
||||
if config.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
@@ -272,43 +277,139 @@ machines.`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.Set(configUsername, cust.Username)
|
||||
|
||||
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
|
||||
return acc.Devices[i].Name, ""
|
||||
|
||||
deviceNames := make([]string, len(acc.Devices))
|
||||
for i, dev := range acc.Devices {
|
||||
if i > 0 && dev.Name == defaultDevice {
|
||||
// Insert the special Jotta device as first entry, making it the default choice.
|
||||
copy(deviceNames[1:i+1], deviceNames[0:i])
|
||||
deviceNames[0] = dev.Name
|
||||
} else {
|
||||
deviceNames[i] = dev.Name
|
||||
}
|
||||
}
|
||||
|
||||
help := fmt.Sprintf(`The device to use. In standard setup the built-in %s device is used,
|
||||
which contains predefined mountpoints for archive, sync etc. All other devices
|
||||
are treated as backup devices by the official Jottacloud client. You may create
|
||||
a new by entering a unique name.`, defaultDevice)
|
||||
return fs.ConfigChoose("choose_device_result", "config_device", help, len(deviceNames), func(i int) (string, string) {
|
||||
return deviceNames[i], ""
|
||||
})
|
||||
case "choose_device_result":
|
||||
device := config.Result
|
||||
m.Set(configDevice, device)
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
username, _ := m.Get(configUsername)
|
||||
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
|
||||
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
isNew := true
|
||||
for _, dev := range acc.Devices {
|
||||
if strings.EqualFold(dev.Name, device) { // If device name exists with different casing we prefer the existing (not sure if and how the api handles the opposite)
|
||||
device = dev.Name // Prefer same casing as existing, e.g. if user entered "jotta" we use the standard casing "Jotta" instead
|
||||
isNew = false
|
||||
break
|
||||
}
|
||||
}
|
||||
var dev *api.JottaDevice
|
||||
if isNew {
|
||||
fs.Debugf(nil, "Creating new device: %s", device)
|
||||
dev, err = createDevice(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
|
||||
if !isNew {
|
||||
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var help string
|
||||
if device == defaultDevice {
|
||||
// With built-in Jotta device the mountpoint choice is exclusive,
|
||||
// we do not want to risk any problems by creating new mountpoints on it.
|
||||
help = fmt.Sprintf(`The mountpoint to use on the built-in device %s.
|
||||
The standard setup is to use the %s mountpoint. Most other mountpoints
|
||||
have very limited support in rclone and should generally be avoided.`, defaultDevice, defaultMountpoint)
|
||||
return fs.ConfigChooseExclusive("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) {
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
}
|
||||
help = fmt.Sprintf(`The mountpoint to use on the non-standard device %s.
|
||||
You may create a new by entering a unique name.`, device)
|
||||
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) {
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
case "choose_device_mountpoint":
|
||||
mountpoint := config.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
device, _ := m.Get(configDevice)
|
||||
|
||||
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
isNew := true
|
||||
for _, mnt := range dev.MountPoints {
|
||||
if strings.EqualFold(mnt.Name, mountpoint) {
|
||||
mountpoint = mnt.Name
|
||||
isNew = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if isNew {
|
||||
if device == defaultDevice {
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
||||
}
|
||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
|
||||
return fs.ConfigGoto("end")
|
||||
case "end":
|
||||
// All the config flows end up here in case we need to carry on with something
|
||||
@@ -331,16 +432,17 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote jottacloud
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *fs.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
fileEndpoint string
|
||||
allocateEndpoint string
|
||||
jfsSrv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *fs.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
// Object describes a jottacloud object
|
||||
@@ -518,7 +620,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
|
||||
values.Set("client_id", defaultClientID)
|
||||
values.Set("grant_type", "password")
|
||||
values.Set("password", loginToken.AuthToken)
|
||||
values.Set("scope", "offline_access+openid")
|
||||
values.Set("scope", "openid offline_access")
|
||||
values.Set("username", loginToken.Username)
|
||||
values.Encode()
|
||||
opts = rest.Opts{
|
||||
@@ -587,15 +689,47 @@ func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *ap
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// setEndpointURL generates the API endpoint URL
|
||||
func (f *Fs) setEndpointURL() {
|
||||
// createDevice makes a device
|
||||
func createDevice(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: urlPathEscape(path),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("type", "WORKSTATION")
|
||||
|
||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create device: %w", err)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// createMountPoint makes a mount point
|
||||
func createMountPoint(ctx context.Context, srv *rest.Client, path string) (info *api.JottaMountPoint, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: urlPathEscape(path),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create mountpoint: %w", err)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// setEndpoints generates the API endpoints
|
||||
func (f *Fs) setEndpoints() {
|
||||
if f.opt.Device == "" {
|
||||
f.opt.Device = defaultDevice
|
||||
}
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
f.fileEndpoint = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
f.allocateEndpoint = path.Join("/jfs", f.opt.Device, f.opt.Mountpoint)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
@@ -607,7 +741,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
|
||||
var result api.JottaFile
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
@@ -648,17 +782,34 @@ func errorHandler(resp *http.Response) error {
|
||||
|
||||
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
|
||||
func urlPathEscape(in string) string {
|
||||
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
|
||||
return strings.ReplaceAll(rest.URLPathEscape(in), "+", "%2B")
|
||||
}
|
||||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
func (f *Fs) filePathRaw(file string) string {
|
||||
return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
||||
// Optionally made absolute by prefixing with "/", typically required when used
|
||||
// as request parameter instead of the path (which is relative to some root url).
|
||||
func (f *Fs) filePathRaw(file string, absolute bool) string {
|
||||
prefix := ""
|
||||
if absolute {
|
||||
prefix = "/"
|
||||
}
|
||||
return path.Join(prefix, f.fileEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
||||
}
|
||||
|
||||
// filePath returns an escaped file path (f.root, file)
|
||||
func (f *Fs) filePath(file string) string {
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
return urlPathEscape(f.filePathRaw(file, false))
|
||||
}
|
||||
|
||||
// allocatePathRaw returns an unescaped allocate file path (f.root, file)
|
||||
// Optionally made absolute by prefixing with "/", typically required when used
|
||||
// as request parameter instead of the path (which is relative to some root url).
|
||||
func (f *Fs) allocatePathRaw(file string, absolute bool) string {
|
||||
prefix := ""
|
||||
if absolute {
|
||||
prefix = "/"
|
||||
}
|
||||
return path.Join(prefix, f.allocateEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
@@ -670,7 +821,7 @@ func (f *Fs) filePath(file string) string {
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if legacyTokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
refreshBody, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -680,7 +831,7 @@ func grantTypeFilter(req *http.Request) {
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
req.Body = io.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -691,12 +842,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
if ok {
|
||||
ver, err = strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("Failed to parse config version")
|
||||
return nil, nil, errors.New("failed to parse config version")
|
||||
}
|
||||
ok = (ver == configVersion) || (ver == legacyConfigVersion)
|
||||
}
|
||||
if !ok {
|
||||
return nil, nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
return nil, nil, errors.New("outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
@@ -742,7 +893,7 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
// Create OAuth Client
|
||||
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to configure Jottacloud oauth client: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to configure Jottacloud oauth client: %w", err)
|
||||
}
|
||||
return oAuthClient, ts, nil
|
||||
}
|
||||
@@ -768,7 +919,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
jfsSrv: rest.NewClient(oAuthClient).SetRoot(jfsURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
@@ -778,7 +929,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: false,
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
f.jfsSrv.SetErrorHandler(errorHandler)
|
||||
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
||||
f.features.ListR = nil
|
||||
}
|
||||
@@ -797,7 +948,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpointURL()
|
||||
f.setEndpoints()
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
@@ -863,7 +1014,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
|
||||
opts.Parameters.Set("mkDir", "true")
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &jf)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -892,7 +1043,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
@@ -931,49 +1082,106 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listFileDirFn is called from listFileDir to handle an object.
|
||||
type listFileDirFn func(fs.DirEntry) error
|
||||
func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback func(fs.DirEntry) error) error {
|
||||
|
||||
// List the objects and directories into entries, from a
|
||||
// special kind of JottaFolder representing a FileDirLis
|
||||
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
|
||||
pathPrefixLength := len(pathPrefix)
|
||||
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
|
||||
startPathLength := len(startPath)
|
||||
for i := range startFolder.Folders {
|
||||
folder := &startFolder.Folders[i]
|
||||
if !f.validFolder(folder) {
|
||||
return nil
|
||||
type stats struct {
|
||||
Folders int `xml:"folders"`
|
||||
Files int `xml:"files"`
|
||||
}
|
||||
var expected, actual stats
|
||||
|
||||
type xmlFile struct {
|
||||
Path string `xml:"path"`
|
||||
Name string `xml:"filename"`
|
||||
Checksum string `xml:"md5"`
|
||||
Size int64 `xml:"size"`
|
||||
Modified api.Rfc3339Time `xml:"modified"` // Note: Liststream response includes 3 decimal milliseconds, but we ignore them since there is second precision everywhere else
|
||||
Created api.Rfc3339Time `xml:"created"`
|
||||
}
|
||||
|
||||
type xmlFolder struct {
|
||||
Path string `xml:"path"`
|
||||
}
|
||||
|
||||
addFolder := func(path string) error {
|
||||
return callback(fs.NewDir(filesystem.opt.Enc.ToStandardPath(path), time.Time{}))
|
||||
}
|
||||
|
||||
addFile := func(f *xmlFile) error {
|
||||
return callback(&Object{
|
||||
hasMetaData: true,
|
||||
fs: filesystem,
|
||||
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
|
||||
size: f.Size,
|
||||
md5: f.Checksum,
|
||||
modTime: time.Time(f.Modified),
|
||||
})
|
||||
}
|
||||
|
||||
// liststream paths are /mountpoint/root/path
|
||||
// so the returned paths should have /mountpoint/root/ trimmed
|
||||
// as the caller is expecting path.
|
||||
pathPrefix := filesystem.opt.Enc.FromStandardPath(path.Join("/", filesystem.opt.Mountpoint, filesystem.root))
|
||||
trimPathPrefix := func(p string) string {
|
||||
p = strings.TrimPrefix(p, pathPrefix)
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
return p
|
||||
}
|
||||
|
||||
uniqueFolders := map[string]bool{}
|
||||
decoder := xml.NewDecoder(r)
|
||||
|
||||
for {
|
||||
t, err := decoder.Token()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if folderPathLength > pathPrefixLength {
|
||||
remoteDir = folderPath[pathPrefixLength+1:]
|
||||
if folderPathLength > startPathLength {
|
||||
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
|
||||
err := fn(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range folder.Files {
|
||||
file := &folder.Files[i]
|
||||
if f.validFile(file) {
|
||||
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fn(o)
|
||||
if err != nil {
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
switch se.Name.Local {
|
||||
case "file":
|
||||
var f xmlFile
|
||||
if err := decoder.DecodeElement(&f, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Path = trimPathPrefix(f.Path)
|
||||
actual.Files++
|
||||
if !uniqueFolders[f.Path] {
|
||||
uniqueFolders[f.Path] = true
|
||||
actual.Folders++
|
||||
if err := addFolder(f.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := addFile(&f); err != nil {
|
||||
return err
|
||||
}
|
||||
case "folder":
|
||||
var f xmlFolder
|
||||
if err := decoder.DecodeElement(&f, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Path = trimPathPrefix(f.Path)
|
||||
uniqueFolders[f.Path] = true
|
||||
actual.Folders++
|
||||
if err := addFolder(f.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
case "stats":
|
||||
if err := decoder.DecodeElement(&expected, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if expected.Folders != actual.Folders ||
|
||||
expected.Files != actual.Files {
|
||||
return fmt.Errorf("invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -988,12 +1196,23 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
Path: f.filePath(dir),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("mode", "list")
|
||||
opts.Parameters.Set("mode", "liststream")
|
||||
list := walk.NewListRHelper(callback)
|
||||
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.jfsSrv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
err = parseListRStream(ctx, resp.Body, f, func(d fs.DirEntry) error {
|
||||
if d.Remote() == dir {
|
||||
return nil
|
||||
}
|
||||
return list.Add(d)
|
||||
})
|
||||
_ = resp.Body.Close()
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1005,10 +1224,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
return fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1032,13 +1247,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if f.opt.Device != "Jotta" {
|
||||
return nil, errors.New("upload not supported for devices other than Jotta")
|
||||
}
|
||||
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
@@ -1048,10 +1260,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// chop off trailing / if it exists
|
||||
if strings.HasSuffix(dirPath, "/") {
|
||||
dirPath = dirPath[:len(dirPath)-1]
|
||||
}
|
||||
parent := path.Dir(dirPath)
|
||||
parent := path.Dir(strings.TrimSuffix(dirPath, "/"))
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
@@ -1099,7 +1308,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.jfsSrv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1126,6 +1335,45 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// createOrUpdate tries to make remote file match without uploading.
|
||||
// If the remote file exists, and has matching size and md5, only
|
||||
// timestamps are updated. If the file does not exist or does does
|
||||
// not match size and md5, but matching content can be constructed
|
||||
// from deduplication, the file will be updated/created. If the file
|
||||
// is currently in trash, but can be made to match, it will be
|
||||
// restored. Returns ErrorObjectNotFound if upload will be necessary
|
||||
// to get a matching remote file.
|
||||
func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.filePath(file),
|
||||
Parameters: url.Values{},
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
|
||||
opts.Parameters.Set("cphash", "true")
|
||||
|
||||
fileDate := api.JottaTime(modTime).String()
|
||||
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
|
||||
opts.ExtraHeaders["JMd5"] = md5
|
||||
opts.ExtraHeaders["JCreated"] = fileDate
|
||||
opts.ExtraHeaders["JModified"] = fileDate
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist, i.e. not matching size and md5, and not possible to make it by deduplication
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
@@ -1134,11 +1382,11 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, dest))))
|
||||
opts.Parameters.Set(method, f.filePathRaw(dest, true))
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1149,9 +1397,9 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1169,6 +1417,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
@@ -1179,9 +1433,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1241,7 +1495,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.fileEndpoint, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't move directory: %w", err)
|
||||
@@ -1266,7 +1520,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
@@ -1292,19 +1546,19 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return "", errors.New("couldn't create public link - no uri received")
|
||||
}
|
||||
if result.PublicSharePath != "" {
|
||||
webLink := joinPath(baseURL, result.PublicSharePath)
|
||||
webLink := joinPath(wwwURL, result.PublicSharePath)
|
||||
fs.Debugf(nil, "Web link: %s", webLink)
|
||||
} else {
|
||||
fs.Debugf(nil, "No web link received")
|
||||
}
|
||||
directLink := joinPath(baseURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
|
||||
directLink := joinPath(wwwURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
|
||||
fs.Debugf(nil, "Direct link: %s", directLink)
|
||||
return directLink, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := getDriveInfo(ctx, f.srv, f.user)
|
||||
info, err := getDriveInfo(ctx, f.jfsSrv, f.user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1470,40 +1724,19 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// prepare allocate request with existing metadata but changed timestamps
|
||||
var resp *http.Response
|
||||
var options []fs.OpenOption
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "files/v1/allocate",
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(modTime).APIString()
|
||||
var request = api.AllocateFileRequest{
|
||||
Bytes: o.size,
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: o.md5,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
// request check/update with existing metadata and new modtime
|
||||
// (note that if size/md5 does not match, the file content will
|
||||
// also be modified if deduplication is possible, i.e. it is
|
||||
// important to use correct/latest values)
|
||||
_, err = o.fs.createOrUpdate(ctx, o.remote, modTime, o.size, o.md5)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// file was modified (size/md5 changed) between readMetaData and createOrUpdate?
|
||||
return errors.New("metadata did not match")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// check response
|
||||
if response.State != "COMPLETED" {
|
||||
// could be the file was modified (size/md5 changed) between readMetaData and the allocate request
|
||||
return errors.New("metadata did not match")
|
||||
}
|
||||
|
||||
// update local metadata
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
@@ -1528,7 +1761,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Parameters.Set("mode", "bin")
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.jfsSrv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1555,7 +1788,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
var tempFile *os.File
|
||||
|
||||
// create the cache file
|
||||
tempFile, err = ioutil.TempFile("", cachePrefix)
|
||||
tempFile, err = os.CreateTemp("", cachePrefix)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -1583,7 +1816,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = ioutil.ReadAll(teeReader)
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -1596,7 +1829,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
@@ -1605,12 +1838,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err == nil {
|
||||
// if the object exists delete it
|
||||
err = o.remove(ctx, true)
|
||||
if err != nil {
|
||||
if err != nil && err != fs.ErrorObjectNotFound {
|
||||
// if delete failed then report that, unless it was because the file did not exist after all
|
||||
return fmt.Errorf("failed to remove old object: %w", err)
|
||||
}
|
||||
}
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1641,7 +1874,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||
fileDate := api.Rfc3339Time(src.ModTime(ctx)).String()
|
||||
|
||||
// the allocate request
|
||||
var request = api.AllocateFileRequest{
|
||||
@@ -1649,7 +1882,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: md5String,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
Path: o.fs.allocatePathRaw(o.remote, true),
|
||||
}
|
||||
|
||||
// send it
|
||||
@@ -1680,7 +1913,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// copy the already uploaded bytes into the trash :)
|
||||
var result api.UploadResponse
|
||||
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
|
||||
_, err = io.CopyN(io.Discard, in, response.ResumePos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1697,7 +1930,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
|
||||
return o.readMetaData(ctx, true)
|
||||
}
|
||||
|
||||
@@ -1718,10 +1951,17 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
|
||||
opts.Parameters.Set("dl", "true")
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package koofr provides an interface to the Koofr storage system.
|
||||
package koofr
|
||||
|
||||
import (
|
||||
@@ -28,33 +29,57 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your storage provider.",
|
||||
// NOTE if you add a new provider here, then add it in the
|
||||
// setProviderDefaults() function and update options accordingly
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "koofr",
|
||||
Help: "Koofr, https://app.koofr.net/",
|
||||
}, {
|
||||
Value: "digistorage",
|
||||
Help: "Digi Storage, https://storage.rcs-rds.ro/",
|
||||
}, {
|
||||
Value: "other",
|
||||
Help: "Any other Koofr API compatible storage service",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use.",
|
||||
Default: "https://app.koofr.net",
|
||||
Provider: "other",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name.",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Provider: "koofr",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
|
||||
Provider: "digistorage",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at your service's settings page).",
|
||||
Provider: "other",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
@@ -71,6 +96,7 @@ func init() {
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
@@ -255,13 +281,38 @@ func (f *Fs) fullPath(part string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func setProviderDefaults(opt *Options) {
|
||||
// handle old, provider-less configs
|
||||
if opt.Provider == "" {
|
||||
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
|
||||
opt.Provider = "koofr"
|
||||
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
|
||||
opt.Provider = "digistorage"
|
||||
} else {
|
||||
opt.Provider = "other"
|
||||
}
|
||||
}
|
||||
// now assign an endpoint
|
||||
if opt.Provider == "koofr" {
|
||||
opt.Endpoint = "https://app.koofr.net"
|
||||
} else if opt.Provider == "digistorage" {
|
||||
opt.Endpoint = "https://storage.rcs-rds.ro"
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and rclone configuration options
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setProviderDefaults(opt)
|
||||
return NewFsFromOptions(ctx, name, root, opt)
|
||||
}
|
||||
|
||||
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
|
||||
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -301,9 +352,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
}
|
||||
if f.mountID == "" {
|
||||
if opt.MountID == "" {
|
||||
return nil, errors.New("Failed to find primary mount")
|
||||
return nil, errors.New("failed to find primary mount")
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
return nil, errors.New("failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
@@ -325,7 +376,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for i, file := range files {
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
|
||||
entries[i] = fs.NewDir(remote, time.Time{})
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
@@ -617,7 +668,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
//
|
||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
||||
//
|
||||
// I am not sure about meaning of "path" parameter; in my expriments
|
||||
// I am not sure about meaning of "path" parameter; in my experiments
|
||||
// it is always "%2F", and omitting it or putting any other value
|
||||
// results in 404.
|
||||
//
|
||||
|
||||
@@ -17,8 +17,12 @@ var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSp
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
root, e := syscall.UTF16PtrFromString(f.root)
|
||||
if e != nil {
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e)
|
||||
}
|
||||
_, _, e1 := getFreeDiskSpace.Call(
|
||||
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||
uintptr(unsafe.Pointer(root)),
|
||||
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
|
||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -42,9 +42,22 @@ func init() {
|
||||
Description: "Local Disk",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `Depending on which OS is in use the local backend may return only some
|
||||
of the system metadata. Setting system metadata is supported on all
|
||||
OSes but setting user metadata is only supported on linux, freebsd,
|
||||
netbsd, macOS and Solaris. It is **not** supported on Windows yet
|
||||
([see pkg/attrs#47](https://github.com/pkg/xattr/issues/47)).
|
||||
|
||||
User metadata is stored as extended attributes (which may not be
|
||||
supported by all file systems) under the "user.*" prefix.
|
||||
`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
@@ -110,8 +123,8 @@ routine so this flag shouldn't normally be used.`,
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
- source file is being updated" if the file changes during upload.
|
||||
are being uploaded and aborts with a message which starts "can't copy -
|
||||
source file is being updated" if the file changes during upload.
|
||||
|
||||
However on some file systems this modification time check may fail (e.g.
|
||||
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
||||
@@ -221,15 +234,16 @@ type Options struct {
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
xattrSupported int32 // whether xattrs are supported (atomic access)
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
@@ -252,7 +266,10 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
var (
|
||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
)
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -273,12 +290,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
if xattrSupported {
|
||||
f.xattrSupported = 1
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
SlowHash: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
FilterAware: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -289,7 +314,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
// Check to see if this is a .rclonelink if not found
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
return nil, errLinksNeedsSuffix
|
||||
}
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -423,6 +457,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
|
||||
|
||||
fsDirPath := f.localPath(dir)
|
||||
_, err = os.Stat(fsDirPath)
|
||||
if err != nil {
|
||||
@@ -473,7 +509,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
if fierr != nil {
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
// Don't report errors on any file names that are excluded
|
||||
if useFilter {
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
if !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
@@ -494,6 +537,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
// Quietly skip errors on excluded files and directories
|
||||
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||
@@ -518,6 +565,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -610,7 +662,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
precision = time.Second
|
||||
|
||||
// Create temporary file and test it
|
||||
fd, err := ioutil.TempFile("", "rclone")
|
||||
fd, err := os.CreateTemp("", "rclone")
|
||||
if err != nil {
|
||||
// If failed return 1s
|
||||
// fmt.Println("Failed to create temp file", err)
|
||||
@@ -679,9 +731,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -903,7 +955,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", fmt.Errorf("hash: failed to open: %w", err)
|
||||
}
|
||||
var hashes map[hash.Type]string
|
||||
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
|
||||
hashes, err = hash.StreamTypes(readers.NewContextReader(ctx, in), hash.NewHashSet(r))
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("hash: failed to read: %w", err)
|
||||
@@ -937,17 +989,22 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Set the atime and ltime of the object
|
||||
func (o *Object) setTimes(atime, mtime time.Time) (err error) {
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, atime, mtime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, atime, mtime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.opt.NoSetModTime {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, modTime, modTime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, modTime, modTime)
|
||||
}
|
||||
err := o.setTimes(modTime, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1032,7 +1089,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
@@ -1133,6 +1190,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Wipe hashes before update
|
||||
o.clearHashCache()
|
||||
|
||||
var symlinkData bytes.Buffer
|
||||
// If the object is a regular file, create it.
|
||||
// If it is a translated link, just read in the contents, and
|
||||
@@ -1219,6 +1279,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Fetch and set metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
err = o.writeMetadata(meta)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set metadata: %w", err)
|
||||
}
|
||||
|
||||
// ReRead info now that we have finished
|
||||
return o.lstat()
|
||||
}
|
||||
@@ -1295,6 +1365,13 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
// clearHashCache wipes any cached hashes for the object
|
||||
func (o *Object) clearHashCache() {
|
||||
o.fs.objectMetaMu.Lock()
|
||||
o.hashes = nil
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
func (o *Object) lstat() error {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
@@ -1306,34 +1383,60 @@ func (o *Object) lstat() error {
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
o.clearHashCache()
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
metadata, err = o.getXattr()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.readMetadataFromFile(&metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Write the metadata on the object
|
||||
func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
||||
err = o.setXattr(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = o.writeMetadataToFile(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
} else {
|
||||
s = filepath.Clean(s)
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
@@ -1348,4 +1451,5 @@ var (
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,17 +1,25 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -27,7 +35,6 @@ func TestMain(m *testing.M) {
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
@@ -72,7 +79,6 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
func TestSymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
f := r.Flocal.(*Fs)
|
||||
dir := f.root
|
||||
|
||||
@@ -141,10 +147,24 @@ func TestSymlink(t *testing.T) {
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check that NewFs works with the suffixed version and --links
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.Equal(t, dir, f2.(*Fs).root)
|
||||
|
||||
// Check that NewFs doesn't see the non suffixed version with --links
|
||||
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, errLinksNeedsSuffix, err)
|
||||
require.Nil(t, f2)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
contents, err := ioutil.ReadAll(in)
|
||||
contents, err := io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt", string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
@@ -152,7 +172,7 @@ func TestSymlink(t *testing.T) {
|
||||
// Check reading the object with range
|
||||
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
|
||||
require.NoError(t, err)
|
||||
contents, err = ioutil.ReadAll(in)
|
||||
contents, err = io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt"[2:5+1], string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
@@ -166,3 +186,372 @@ func TestSymlinkError(t *testing.T) {
|
||||
_, err := NewFs(context.Background(), "local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
// Test hashes on updating an object
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
md5, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check the hash is as expected
|
||||
md5, err = o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "45685e95985e20822fb2538a522a5ccf", md5)
|
||||
}
|
||||
|
||||
// Test hashes on deleting an object
|
||||
func TestHashOnDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
md5, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Delete the object
|
||||
require.NoError(t, o.Remove(ctx))
|
||||
|
||||
// Test the hash cache is empty
|
||||
require.Nil(t, o.(*Object).hashes)
|
||||
|
||||
// Test the hash returns an error
|
||||
_, err = o.Hash(ctx, hash.MD5)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
r.WriteFile(filePath, "metadata file contents", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
obj, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
features := f.Features()
|
||||
|
||||
var hasXID, hasAtime, hasBtime bool
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "linux":
|
||||
hasXID, hasAtime, hasBtime = true, true, true
|
||||
case "openbsd", "solaris":
|
||||
hasXID, hasAtime = true, true
|
||||
case "windows":
|
||||
hasAtime, hasBtime = true, true
|
||||
case "plan9", "js":
|
||||
// nada
|
||||
default:
|
||||
t.Errorf("No test cases for OS %q", runtime.GOOS)
|
||||
}
|
||||
|
||||
assert.True(t, features.ReadMetadata)
|
||||
assert.True(t, features.WriteMetadata)
|
||||
assert.Equal(t, xattrSupported, features.UserMetadata)
|
||||
|
||||
t.Run("Xattr", func(t *testing.T) {
|
||||
if !xattrSupported {
|
||||
t.Skip()
|
||||
}
|
||||
m, err := o.getXattr()
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
inM := fs.Metadata{
|
||||
"potato": "chips",
|
||||
"cabbage": "soup",
|
||||
}
|
||||
err = o.setXattr(inM)
|
||||
require.NoError(t, err)
|
||||
|
||||
m, err = o.getXattr()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
assert.Equal(t, inM, m)
|
||||
})
|
||||
|
||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||
mt, ok := o.parseMetadataTime(m, key)
|
||||
assert.True(t, ok)
|
||||
dt := mt.Sub(when)
|
||||
precision := time.Second
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||
}
|
||||
|
||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||
value, ok := o.parseMetadataInt(m, key, base)
|
||||
assert.True(t, ok)
|
||||
return value
|
||||
}
|
||||
t.Run("Read", func(t *testing.T) {
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
|
||||
// All OSes have these
|
||||
checkInt(m, "mode", 8)
|
||||
checkTime(m, "mtime", when)
|
||||
|
||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||
|
||||
if hasAtime {
|
||||
checkTime(m, "atime", when)
|
||||
}
|
||||
if hasBtime {
|
||||
checkTime(m, "btime", when)
|
||||
}
|
||||
if hasXID {
|
||||
checkInt(m, "uid", 10)
|
||||
checkInt(m, "gid", 10)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Write", func(t *testing.T) {
|
||||
newAtimeString := "2011-12-13T14:15:16.999999999Z"
|
||||
newAtime := fstest.Time(newAtimeString)
|
||||
newMtimeString := "2011-12-12T14:15:16.999999999Z"
|
||||
newMtime := fstest.Time(newMtimeString)
|
||||
newBtimeString := "2011-12-11T14:15:16.999999999Z"
|
||||
newBtime := fstest.Time(newBtimeString)
|
||||
newM := fs.Metadata{
|
||||
"mtime": newMtimeString,
|
||||
"atime": newAtimeString,
|
||||
"btime": newBtimeString,
|
||||
// Can't test uid, gid without being root
|
||||
"mode": "0767",
|
||||
"potato": "wedges",
|
||||
}
|
||||
err := o.writeMetadata(newM)
|
||||
require.NoError(t, err)
|
||||
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
|
||||
mode := checkInt(m, "mode", 8)
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||
}
|
||||
|
||||
checkTime(m, "mtime", newMtime)
|
||||
if hasAtime {
|
||||
checkTime(m, "atime", newAtime)
|
||||
}
|
||||
if haveSetBTime {
|
||||
checkTime(m, "btime", newBtime)
|
||||
}
|
||||
if xattrSupported {
|
||||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
when := time.Now()
|
||||
r.WriteFile("included", "included file", when)
|
||||
r.WriteFile("excluded", "excluded file", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included"))
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[excluded included]", fmt.Sprint(entries))
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func testFilterSymlink(t *testing.T, copyLinks bool) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
|
||||
r.WriteFile("included.file", "included file", when)
|
||||
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
|
||||
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
|
||||
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
|
||||
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
|
||||
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
if copyLinks {
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
} else {
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
}
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Reset global error count
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included.file"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir/**"))
|
||||
if copyLinks {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
|
||||
} else {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
|
||||
}
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
if copyLinks {
|
||||
// Check 1 global errors one for each dangling symlink
|
||||
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
} else {
|
||||
// Check 0 global errors as dangling symlink copied properly
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
}
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
|
||||
sort.Sort(entries)
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Check listing through a symlink still works
|
||||
entries, err = f.List(ctx, "included.dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func TestFilterSymlinkCopyLinks(t *testing.T) {
|
||||
testFilterSymlink(t, true)
|
||||
}
|
||||
|
||||
func TestFilterSymlinkLinks(t *testing.T) {
|
||||
testFilterSymlink(t, false)
|
||||
}
|
||||
|
||||
func TestCopySymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file and a symlink to it
|
||||
r.WriteFile("src/file.txt", "hello world", when)
|
||||
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
|
||||
// Set fs into "-l/--links" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a symlink and it has the right contents
|
||||
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
|
||||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user