mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
1126 Commits
fix-dropbo
...
fix-b2-acl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2e99232a04 | ||
|
|
f36d6d01b5 | ||
|
|
0c03aa3a8b | ||
|
|
caa2b8bf40 | ||
|
|
421e840e37 | ||
|
|
9b57d27be4 | ||
|
|
627ac1b2d9 | ||
|
|
ae395d8cf0 | ||
|
|
f04520a6e3 | ||
|
|
c968c3e41c | ||
|
|
3661791e82 | ||
|
|
4198763c35 | ||
|
|
3de47b8ed4 | ||
|
|
71b8e1e80b | ||
|
|
7366e97dfc | ||
|
|
21ba4d9a18 | ||
|
|
96e099d8e7 | ||
|
|
2a31b5bdd6 | ||
|
|
9bdfe4c36f | ||
|
|
e3a2f539fe | ||
|
|
ffa943e31f | ||
|
|
b16f603c51 | ||
|
|
a7a8372976 | ||
|
|
9beb0677e4 | ||
|
|
e43b5ce5e5 | ||
|
|
97328e5755 | ||
|
|
7b7d780fff | ||
|
|
c2600f9e4d | ||
|
|
7bd853ce35 | ||
|
|
05150cfb1d | ||
|
|
25366268fe | ||
|
|
c08d48a50d | ||
|
|
454574e2cc | ||
|
|
9218a3eb00 | ||
|
|
1e4ef4b4d5 | ||
|
|
8d92f7d697 | ||
|
|
fd56abc5f2 | ||
|
|
b323bf34e2 | ||
|
|
e78e73eae7 | ||
|
|
f51a5eca2e | ||
|
|
39e2af7974 | ||
|
|
b3217adf08 | ||
|
|
074234119a | ||
|
|
6210e22ab5 | ||
|
|
940e99a929 | ||
|
|
79b6866b57 | ||
|
|
c142e3edcc | ||
|
|
5c646dff9a | ||
|
|
19dfaf7440 | ||
|
|
169990e270 | ||
|
|
e781bcbba1 | ||
|
|
409916b0c5 | ||
|
|
d9c5be32e7 | ||
|
|
57074be9b3 | ||
|
|
bae5c7c81b | ||
|
|
05321f4aef | ||
|
|
c9d7248d85 | ||
|
|
da8f9be84b | ||
|
|
b806166147 | ||
|
|
20f936c9d4 | ||
|
|
91cdaffcc1 | ||
|
|
33bf9b4923 | ||
|
|
b4944f4520 | ||
|
|
286b152e7b | ||
|
|
f7764a0c9d | ||
|
|
07fcba888c | ||
|
|
af705c754c | ||
|
|
f85e3209b3 | ||
|
|
e77dcb7f52 | ||
|
|
4ab842198a | ||
|
|
a8059b8a90 | ||
|
|
cf2c2792e6 | ||
|
|
e6e1c49b58 | ||
|
|
712f9c9760 | ||
|
|
a238877ad8 | ||
|
|
70297c3aed | ||
|
|
a074a2b983 | ||
|
|
00ceeef21c | ||
|
|
2e81b78486 | ||
|
|
bb11803f1f | ||
|
|
a542ddf60a | ||
|
|
257f5d279a | ||
|
|
4f05ece39e | ||
|
|
9c8c0a58b5 | ||
|
|
a70c20fe6b | ||
|
|
59e77f794e | ||
|
|
1a66736ef0 | ||
|
|
844025d053 | ||
|
|
3a03f2778c | ||
|
|
29c6c86c00 | ||
|
|
a32fde09ca | ||
|
|
1d50336615 | ||
|
|
015b250905 | ||
|
|
4b1ada2d51 | ||
|
|
f589dbc077 | ||
|
|
8efead1ee6 | ||
|
|
9a17b32b5d | ||
|
|
8b65c55711 | ||
|
|
e2f47ecdeb | ||
|
|
b868561951 | ||
|
|
78db3dba0e | ||
|
|
41876dd669 | ||
|
|
2e72ec96c1 | ||
|
|
9742648fce | ||
|
|
d73264572b | ||
|
|
ff801e8e17 | ||
|
|
72c013c6f4 | ||
|
|
1eff0eef7a | ||
|
|
5a5318720a | ||
|
|
e253b44882 | ||
|
|
0d7426a2dd | ||
|
|
f102ef2161 | ||
|
|
57c7fde864 | ||
|
|
50df8cec9c | ||
|
|
8cd3251b57 | ||
|
|
cc2f6f722c | ||
|
|
6cda4c2c3c | ||
|
|
023b666863 | ||
|
|
2a4c6ad0e7 | ||
|
|
6d02530f9d | ||
|
|
c5bc857f9b | ||
|
|
0d1e017e09 | ||
|
|
3351b1e6ae | ||
|
|
b085aa1a3f | ||
|
|
eb0c8284f1 | ||
|
|
f5c7c597ba | ||
|
|
3cef84aabe | ||
|
|
93afd5c346 | ||
|
|
1c3c8babd3 | ||
|
|
690a7ac783 | ||
|
|
bbcc9a45fe | ||
|
|
16949fde09 | ||
|
|
8e4b87ae03 | ||
|
|
db6002952e | ||
|
|
96e14bf456 | ||
|
|
54da6154c4 | ||
|
|
f50537b64b | ||
|
|
f37b25a2df | ||
|
|
29b8c71522 | ||
|
|
7b66ca132d | ||
|
|
9ce0df3242 | ||
|
|
f4c5f1f185 | ||
|
|
825f7826f5 | ||
|
|
34140b2f57 | ||
|
|
e18ae5da09 | ||
|
|
b61912b4c8 | ||
|
|
bfecf5301b | ||
|
|
308323e9c4 | ||
|
|
fc5d6c16b6 | ||
|
|
c821fbeddf | ||
|
|
93d85015af | ||
|
|
a98e3ea6f1 | ||
|
|
167406bc68 | ||
|
|
036abde393 | ||
|
|
edf8978d15 | ||
|
|
f529c02446 | ||
|
|
3fbaa4c0b0 | ||
|
|
af732c5431 | ||
|
|
14de0cfb43 | ||
|
|
c2597a4fa3 | ||
|
|
ceaafe6620 | ||
|
|
d41b9b46d4 | ||
|
|
98d9ba363f | ||
|
|
16fb608bee | ||
|
|
cf9b82b8db | ||
|
|
7d66bfbb7c | ||
|
|
023e32de05 | ||
|
|
b1cb41f8da | ||
|
|
1cb31e8cc7 | ||
|
|
1e7db7193e | ||
|
|
7190c058a7 | ||
|
|
85074f8f88 | ||
|
|
c7329d2ece | ||
|
|
f3e71f129c | ||
|
|
0ffdca42d5 | ||
|
|
dbb6f94d95 | ||
|
|
352f9bcd47 | ||
|
|
d8886b37a6 | ||
|
|
894a5a1a83 | ||
|
|
ada6a92c8b | ||
|
|
df0b7d8eab | ||
|
|
0dfffc0ed4 | ||
|
|
19fc1b2a95 | ||
|
|
bce395385d | ||
|
|
a5b8fcc127 | ||
|
|
269f90c1e4 | ||
|
|
7a1cab57b6 | ||
|
|
c8d5606f2c | ||
|
|
a2545066e2 | ||
|
|
729704bcb8 | ||
|
|
8b4a89d34b | ||
|
|
15a9816512 | ||
|
|
cace18d89a | ||
|
|
a065fb23e5 | ||
|
|
a95c7a001e | ||
|
|
ffa1b1a258 | ||
|
|
8b8a943dd8 | ||
|
|
8c10dee510 | ||
|
|
68be24c88d | ||
|
|
fbc7f2e61b | ||
|
|
b30731c9d0 | ||
|
|
26b6c83e49 | ||
|
|
59c74ea1b8 | ||
|
|
2d05b28b0a | ||
|
|
dc589d3070 | ||
|
|
48e7246163 | ||
|
|
69f4b48719 | ||
|
|
bb0c4ad2d8 | ||
|
|
b389b84685 | ||
|
|
b0f06d9920 | ||
|
|
159229527d | ||
|
|
b5a27b1c75 | ||
|
|
db7db952c1 | ||
|
|
d8d621c175 | ||
|
|
0902e5c48e | ||
|
|
5b6bcfc184 | ||
|
|
1409b89f6c | ||
|
|
00c6642fad | ||
|
|
badefdb060 | ||
|
|
9c2533821d | ||
|
|
c718fe4330 | ||
|
|
3298493b0b | ||
|
|
18f3929186 | ||
|
|
b35db61a80 | ||
|
|
3c17762c4e | ||
|
|
24de896df2 | ||
|
|
2bc2546d5c | ||
|
|
05f128868f | ||
|
|
f7f4468cbc | ||
|
|
aa0ceb6c5c | ||
|
|
f1f923a986 | ||
|
|
8500d95579 | ||
|
|
8c4b06da79 | ||
|
|
6d25ba7c02 | ||
|
|
774efeabf0 | ||
|
|
d24f87c6a9 | ||
|
|
721a9786a7 | ||
|
|
94521959f8 | ||
|
|
6a9ef27b09 | ||
|
|
09fd258b5c | ||
|
|
2cefae51a1 | ||
|
|
e4fb5e99ef | ||
|
|
8bd26c663a | ||
|
|
dd97fbc55f | ||
|
|
b32d00ba37 | ||
|
|
3a2f748aeb | ||
|
|
18be4ad10d | ||
|
|
9a2811f0b2 | ||
|
|
63708d73be | ||
|
|
60323dc5e2 | ||
|
|
359648e002 | ||
|
|
e45c23ab79 | ||
|
|
890b6a45b5 | ||
|
|
227119da16 | ||
|
|
3626f10f26 | ||
|
|
82ad9a30b9 | ||
|
|
448a03181f | ||
|
|
3615619645 | ||
|
|
33ddd540b6 | ||
|
|
a5f277f47e | ||
|
|
bec253fd39 | ||
|
|
815a6ac8aa | ||
|
|
8106f65e0b | ||
|
|
96f77ebe5a | ||
|
|
36f0231082 | ||
|
|
168cb65c61 | ||
|
|
e00db968aa | ||
|
|
bb6b44d199 | ||
|
|
88b35bc32d | ||
|
|
c32d5dd1f3 | ||
|
|
3d9da896d2 | ||
|
|
839c20bb35 | ||
|
|
7c58148840 | ||
|
|
6545755758 | ||
|
|
c86a55c798 | ||
|
|
1d280081d4 | ||
|
|
f48cb5985f | ||
|
|
55e766f4e8 | ||
|
|
63a24255f8 | ||
|
|
bc74f0621e | ||
|
|
f39a08c9d7 | ||
|
|
675548070d | ||
|
|
37ff05a5fa | ||
|
|
c67c1ab4ee | ||
|
|
76f8095bc5 | ||
|
|
f646cd0a2a | ||
|
|
d38f6bb0ab | ||
|
|
11d86c74b2 | ||
|
|
feb6046a8a | ||
|
|
807102ada2 | ||
|
|
770b3496a1 | ||
|
|
da36ce08e4 | ||
|
|
8652cfe575 | ||
|
|
94b1439299 | ||
|
|
97c9e55ddb | ||
|
|
c0b2832509 | ||
|
|
7436768d62 | ||
|
|
55153403aa | ||
|
|
daf449b5f2 | ||
|
|
221dfc3882 | ||
|
|
aab29353d1 | ||
|
|
c24504b793 | ||
|
|
6338d0026e | ||
|
|
ba836d45ff | ||
|
|
367cf984af | ||
|
|
6b7d7d0441 | ||
|
|
cf19073ac9 | ||
|
|
ba5c559fec | ||
|
|
abb8fe8ba1 | ||
|
|
765af387e6 | ||
|
|
d05cf6aba8 | ||
|
|
76a3fef24d | ||
|
|
b40d9bd4c4 | ||
|
|
4680c0776d | ||
|
|
fb305b5976 | ||
|
|
5e91b93e59 | ||
|
|
58c99427b3 | ||
|
|
fee0abf513 | ||
|
|
40024990b7 | ||
|
|
04aa6969a4 | ||
|
|
d2050523de | ||
|
|
1cc6dd349e | ||
|
|
721bae11c3 | ||
|
|
b439199578 | ||
|
|
0bfd6f793b | ||
|
|
76ea716abf | ||
|
|
e635f4c0be | ||
|
|
0cb973f127 | ||
|
|
96ace599a8 | ||
|
|
80bccacd83 | ||
|
|
3349b055f5 | ||
|
|
bef0c23e00 | ||
|
|
84201ed891 | ||
|
|
04608428bf | ||
|
|
6aaa06d7be | ||
|
|
e53bad5353 | ||
|
|
f5397246eb | ||
|
|
b8b73f2656 | ||
|
|
96b67ce0ec | ||
|
|
e2beeffd76 | ||
|
|
30b949642d | ||
|
|
92b3518c78 | ||
|
|
062919e08c | ||
|
|
654f5309b0 | ||
|
|
318fa4472b | ||
|
|
5104e24153 | ||
|
|
9d87a5192d | ||
|
|
29f967dba3 | ||
|
|
1f846c18d4 | ||
|
|
41f561bf26 | ||
|
|
df60e6323c | ||
|
|
58006a925a | ||
|
|
ee2fac1855 | ||
|
|
2188fe38e5 | ||
|
|
b5f8f0973b | ||
|
|
85b8ba9469 | ||
|
|
04a1f673f0 | ||
|
|
0574ebf44a | ||
|
|
22e86ce335 | ||
|
|
c9fce20249 | ||
|
|
5b6f637461 | ||
|
|
07f2f3a62e | ||
|
|
6dc190ec93 | ||
|
|
71f75a1d95 | ||
|
|
1b44035e45 | ||
|
|
054b467f32 | ||
|
|
23da913d03 | ||
|
|
c0cda087a8 | ||
|
|
1773717a47 | ||
|
|
04308dcaa1 | ||
|
|
06f27384dd | ||
|
|
82f1f7d2c4 | ||
|
|
6555d3eb33 | ||
|
|
03229cf394 | ||
|
|
f572bf7829 | ||
|
|
f593558dc2 | ||
|
|
08040a57b0 | ||
|
|
2fa7a3c0fb | ||
|
|
798d1293df | ||
|
|
75c417ad93 | ||
|
|
5ee646f264 | ||
|
|
4a4aca4da7 | ||
|
|
2e4b65f888 | ||
|
|
77cda6773c | ||
|
|
dbc5167281 | ||
|
|
635d1e10ae | ||
|
|
296ceadda6 | ||
|
|
7ae2891252 | ||
|
|
99caf79ffe | ||
|
|
095cf9e4be | ||
|
|
e57553930f | ||
|
|
f122808d86 | ||
|
|
94dbfa4ea6 | ||
|
|
6f2e525821 | ||
|
|
119bddc10b | ||
|
|
28e9fd45cc | ||
|
|
326f3b35ff | ||
|
|
ce83228cb2 | ||
|
|
732bc08ced | ||
|
|
6ef7178ee4 | ||
|
|
9ff6f48d74 | ||
|
|
532af77fd1 | ||
|
|
ab7dfe0c87 | ||
|
|
e489a101f6 | ||
|
|
35a86193b7 | ||
|
|
2833941da8 | ||
|
|
9e6c23d9af | ||
|
|
8bef972262 | ||
|
|
0a968818f6 | ||
|
|
c2ac353183 | ||
|
|
773da395fb | ||
|
|
9e8cd6bff9 | ||
|
|
5d2e327b6f | ||
|
|
77221d7528 | ||
|
|
1971c1ef87 | ||
|
|
7e7dbe16c2 | ||
|
|
002d323c94 | ||
|
|
4ad62ec016 | ||
|
|
95ee14bb2c | ||
|
|
88aabd1f71 | ||
|
|
34627c5c7e | ||
|
|
e33303df94 | ||
|
|
665eceaec3 | ||
|
|
ba09ee18bb | ||
|
|
62bf63d36f | ||
|
|
f38c262471 | ||
|
|
5db88fed2b | ||
|
|
316e65589b | ||
|
|
4401d180aa | ||
|
|
9ccd870267 | ||
|
|
16d1da2c1e | ||
|
|
00a0ee1899 | ||
|
|
b78c9a65fa | ||
|
|
ef3c350686 | ||
|
|
742af80972 | ||
|
|
08a2df51be | ||
|
|
2925e1384c | ||
|
|
2ec0c8d45f | ||
|
|
98579608ec | ||
|
|
a1a41aa0c1 | ||
|
|
f8d56bebaf | ||
|
|
5d799431a7 | ||
|
|
8f23cae1c0 | ||
|
|
964088affa | ||
|
|
f4068d406b | ||
|
|
7511b6f4f1 | ||
|
|
e618ea83dd | ||
|
|
34dc257c55 | ||
|
|
4cacf5d30c | ||
|
|
0537791d14 | ||
|
|
4b1d28550a | ||
|
|
d27c35ee4a | ||
|
|
ffec0d4f03 | ||
|
|
89daa9efd1 | ||
|
|
ee502a757f | ||
|
|
386acaa110 | ||
|
|
efdee3a5fe | ||
|
|
5d85e6bc9c | ||
|
|
4a9469a3dc | ||
|
|
f8884a7200 | ||
|
|
2a40f00077 | ||
|
|
9799fdbae2 | ||
|
|
492504a601 | ||
|
|
0c03a7fead | ||
|
|
7afb4487ef | ||
|
|
b9d0ed4f5c | ||
|
|
baa4c039a0 | ||
|
|
31a8211afa | ||
|
|
3544e09e95 | ||
|
|
b456be4303 | ||
|
|
3e96752079 | ||
|
|
4a5cbf2a19 | ||
|
|
dcd4edc9f5 | ||
|
|
7f5e347d94 | ||
|
|
040677ab5b | ||
|
|
6366d3dfc5 | ||
|
|
60d376c323 | ||
|
|
7b1ca716bf | ||
|
|
d8711cf7f9 | ||
|
|
cd69f9e6e8 | ||
|
|
a737ff21af | ||
|
|
ad9aa693a3 | ||
|
|
964c3e0732 | ||
|
|
a46a3c0811 | ||
|
|
60dcafe04d | ||
|
|
813bf029d4 | ||
|
|
f2d3264054 | ||
|
|
23a0d4a1e6 | ||
|
|
b96ebfc40b | ||
|
|
3fe2aaf96c | ||
|
|
c163e6b250 | ||
|
|
c1492cfa28 | ||
|
|
38a8071a58 | ||
|
|
8c68a76a4a | ||
|
|
e7b736f8ca | ||
|
|
cb30a8c80e | ||
|
|
629a3eeca2 | ||
|
|
f52ae75a51 | ||
|
|
9d5c5bf7ab | ||
|
|
53573b4a09 | ||
|
|
3622e064f5 | ||
|
|
6d28ea7ab5 | ||
|
|
b9fd02039b | ||
|
|
1a41c930f3 | ||
|
|
ddb7eb6e0a | ||
|
|
c114695a66 | ||
|
|
fcba51557f | ||
|
|
9393225a1d | ||
|
|
3d3ff61f74 | ||
|
|
d98f192425 | ||
|
|
54771e4402 | ||
|
|
dc286529bc | ||
|
|
7dc7c021db | ||
|
|
fe1aa13069 | ||
|
|
5fa8e7d957 | ||
|
|
9db7c51eaa | ||
|
|
3859fe2f52 | ||
|
|
0caf417779 | ||
|
|
9eab258ffb | ||
|
|
7df57cd625 | ||
|
|
1fd9b483c8 | ||
|
|
93353c431b | ||
|
|
886dfd23e2 | ||
|
|
116a8021bb | ||
|
|
9e2fbe0f1a | ||
|
|
6d65d116df | ||
|
|
edaeb51ea9 | ||
|
|
6e2e2d9eb2 | ||
|
|
20e15e52a9 | ||
|
|
d0f8b4f479 | ||
|
|
58d82a5c73 | ||
|
|
c0c74003f2 | ||
|
|
60bc7a079a | ||
|
|
20c5ca08fb | ||
|
|
fc57648b75 | ||
|
|
8c5c91e68f | ||
|
|
9dd39e8524 | ||
|
|
9c9186183d | ||
|
|
2ccf416e83 | ||
|
|
5577c7b760 | ||
|
|
f6dbb98a1d | ||
|
|
d042f3194f | ||
|
|
524cd327e6 | ||
|
|
b8c1cf7451 | ||
|
|
0fa68bda02 | ||
|
|
1378bfee63 | ||
|
|
d6870473a1 | ||
|
|
12cd322643 | ||
|
|
1406b6c3c9 | ||
|
|
088a83872d | ||
|
|
cb46092883 | ||
|
|
a2cd5d8fa3 | ||
|
|
1fe2460e38 | ||
|
|
ef5c212f9b | ||
|
|
268a7ff7b8 | ||
|
|
b47d6001a9 | ||
|
|
a4c4ddf052 | ||
|
|
4cc2a7f342 | ||
|
|
c72d2c67ed | ||
|
|
9deab5a563 | ||
|
|
da5b0cb611 | ||
|
|
0187bc494a | ||
|
|
2bdbf00fa3 | ||
|
|
9ee3ad70e9 | ||
|
|
ce182adf46 | ||
|
|
97fc3b9046 | ||
|
|
e59acd16c6 | ||
|
|
acfd7e2403 | ||
|
|
f47893873d | ||
|
|
b9a015e5b9 | ||
|
|
d72d9e591a | ||
|
|
df451e1e70 | ||
|
|
d9959b0271 | ||
|
|
f2c0f82fc6 | ||
|
|
f76c6cc893 | ||
|
|
5e95877840 | ||
|
|
8b491f7f3d | ||
|
|
aea8776a43 | ||
|
|
c387eb8c09 | ||
|
|
a12b2746b4 | ||
|
|
3dbef2b2fd | ||
|
|
f111e0eaf8 | ||
|
|
96207f342c | ||
|
|
e25ac4dcf0 | ||
|
|
28f6efe955 | ||
|
|
f17d7c0012 | ||
|
|
3761cf68b4 | ||
|
|
71554c1371 | ||
|
|
8a46dd1b57 | ||
|
|
3b21857097 | ||
|
|
a10fbf16ea | ||
|
|
f4750928ee | ||
|
|
657be2ace5 | ||
|
|
feaaca4987 | ||
|
|
ebd9462ea6 | ||
|
|
6b9e4f939d | ||
|
|
687a3b1832 | ||
|
|
25d5ed763c | ||
|
|
5e038a5e1e | ||
|
|
4b4e531846 | ||
|
|
89e8fb4818 | ||
|
|
b9bf91c510 | ||
|
|
40b58d59ad | ||
|
|
4fbb50422c | ||
|
|
8d847a4e94 | ||
|
|
e3e08a48cb | ||
|
|
ff6868900d | ||
|
|
aab076029f | ||
|
|
294f090361 | ||
|
|
301e1ad982 | ||
|
|
3cf6ea848b | ||
|
|
bb0b6432ae | ||
|
|
46078d391f | ||
|
|
849bf20598 | ||
|
|
e91f2e342a | ||
|
|
713f8f357d | ||
|
|
83368998be | ||
|
|
4013bc4a4c | ||
|
|
32925dae1f | ||
|
|
6cc70997ba | ||
|
|
d260e3824e | ||
|
|
a5bd26395e | ||
|
|
6fa74340a0 | ||
|
|
4d8ef7bca7 | ||
|
|
6a9ae32012 | ||
|
|
a7fd65bf2d | ||
|
|
1fed2d910c | ||
|
|
c95b580478 | ||
|
|
2be310cd6e | ||
|
|
02a5d350f9 | ||
|
|
18cd2064ec | ||
|
|
59ed70ca91 | ||
|
|
6df56c55b0 | ||
|
|
94e34cb783 | ||
|
|
c3e2392f2b | ||
|
|
f7e3115955 | ||
|
|
e01e8010a0 | ||
|
|
75056dc9b2 | ||
|
|
7aa7acd926 | ||
|
|
0ad38dd6fa | ||
|
|
9cc8ff4dd4 | ||
|
|
b029fb591f | ||
|
|
95e9c4e7f1 | ||
|
|
c40bafb72c | ||
|
|
eac77b06ab | ||
|
|
0355d6daf2 | ||
|
|
c4b8df6903 | ||
|
|
0dd3ae5e0d | ||
|
|
e5aa92c922 | ||
|
|
f6265fbeff | ||
|
|
1397b85214 | ||
|
|
86a0dae632 | ||
|
|
076ff96f6b | ||
|
|
985011e73b | ||
|
|
9ca6bf59c6 | ||
|
|
e5d5ae9ab7 | ||
|
|
ac6bb222f9 | ||
|
|
62d5876eb4 | ||
|
|
9808a53416 | ||
|
|
cc08f66dc1 | ||
|
|
6b8da24eb8 | ||
|
|
333faa6c68 | ||
|
|
1b92e4636e | ||
|
|
c5a299d5b1 | ||
|
|
04a8859d29 | ||
|
|
4b5fe3adad | ||
|
|
7db68b72f1 | ||
|
|
9c667be2a1 | ||
|
|
c0cf54067a | ||
|
|
297ca23abd | ||
|
|
d809930e1d | ||
|
|
fdc0528bd5 | ||
|
|
a0320d6e94 | ||
|
|
89bf036e15 | ||
|
|
1605f9e14d | ||
|
|
cd6fd4be4b | ||
|
|
4ea7c7aa47 | ||
|
|
5834020316 | ||
|
|
f5066a09cd | ||
|
|
863bd93c30 | ||
|
|
d96af3b005 | ||
|
|
3280ceee3b | ||
|
|
930bca2478 | ||
|
|
23b12c39bd | ||
|
|
9d37c208b7 | ||
|
|
c81311722e | ||
|
|
843ddd9136 | ||
|
|
a3fcadddc8 | ||
|
|
a63e1f1383 | ||
|
|
5b84adf3b9 | ||
|
|
f890965020 | ||
|
|
f88a5542cf | ||
|
|
fd94b3a473 | ||
|
|
2aebeb6061 | ||
|
|
e779cacc82 | ||
|
|
37e630178e | ||
|
|
2cdc071b85 | ||
|
|
496e32fd8a | ||
|
|
bf3ba50a0f | ||
|
|
22c226b152 | ||
|
|
5ca7f1fe87 | ||
|
|
f14220ef1e | ||
|
|
424aaac2e1 | ||
|
|
47b69d6300 | ||
|
|
c0c2505977 | ||
|
|
2d7afe8690 | ||
|
|
92187a3b33 | ||
|
|
53aa4b87fd | ||
|
|
edfe183ba2 | ||
|
|
dfc63eb8f1 | ||
|
|
f21f2529a3 | ||
|
|
1efb543ad8 | ||
|
|
92e36fcfc5 | ||
|
|
bf8542c670 | ||
|
|
cc5a1e90d8 | ||
|
|
b39fa54ab2 | ||
|
|
f1147fe1dd | ||
|
|
8897377a54 | ||
|
|
f50b4e51ed | ||
|
|
f135acbdfb | ||
|
|
cdd99a6f39 | ||
|
|
6ecb5794bc | ||
|
|
9a21aff4ed | ||
|
|
8574a7bd67 | ||
|
|
a0fc10e41a | ||
|
|
ae3963e4b4 | ||
|
|
e32f08f37b | ||
|
|
fea4b753b2 | ||
|
|
b2b5b7598c | ||
|
|
5f943aabc8 | ||
|
|
84c785bc36 | ||
|
|
993146375e | ||
|
|
bbe791a886 | ||
|
|
1545ace8f2 | ||
|
|
bcac8fdc83 | ||
|
|
15e1a6bee7 | ||
|
|
9710ded60f | ||
|
|
5f3672102c | ||
|
|
644cc69108 | ||
|
|
1415666074 | ||
|
|
bae550c71e | ||
|
|
beff081abb | ||
|
|
7f5ee5d81f | ||
|
|
8b41dfa50a | ||
|
|
0d8bcc08da | ||
|
|
d3b7f14b66 | ||
|
|
f66928a846 | ||
|
|
3b1122c888 | ||
|
|
463a18aa07 | ||
|
|
0a932dc1f2 | ||
|
|
8856e0e559 | ||
|
|
3b6df71838 | ||
|
|
31de631b22 | ||
|
|
189ef5f257 | ||
|
|
2f67681e3b | ||
|
|
41127965b0 | ||
|
|
8171671d82 | ||
|
|
75617c0c3b | ||
|
|
8b9d23916b | ||
|
|
e43b79e33d | ||
|
|
459cc70a50 | ||
|
|
20578f3f89 | ||
|
|
15da53696e | ||
|
|
2bddba118e | ||
|
|
c7e5976e11 | ||
|
|
f0bf9cfda1 | ||
|
|
671dd047f7 | ||
|
|
6272ca74bc | ||
|
|
f5af761466 | ||
|
|
06f1c0c61c | ||
|
|
e6a9f005d6 | ||
|
|
8f6f4b053c | ||
|
|
fe15a2eeeb | ||
|
|
019667170f | ||
|
|
7a496752f3 | ||
|
|
b569dc11a0 | ||
|
|
df4e6079f1 | ||
|
|
6156f90601 | ||
|
|
cdaea62932 | ||
|
|
78afe01d15 | ||
|
|
4eac88babf | ||
|
|
b4217fabd3 | ||
|
|
92b9dabf3c | ||
|
|
4323ff8a63 | ||
|
|
3e188495f5 | ||
|
|
acb9e17eb3 | ||
|
|
c8ab4f1d02 | ||
|
|
e776a1b122 | ||
|
|
c57af26de9 | ||
|
|
7d89912666 | ||
|
|
cd075f1703 | ||
|
|
35b2ca642c | ||
|
|
127f48e8ad | ||
|
|
3e986cdf54 | ||
|
|
b80d498304 | ||
|
|
757e696a6b | ||
|
|
e3979131f2 | ||
|
|
a774f6bfdb | ||
|
|
d7cd35e2ca | ||
|
|
38e70f1797 | ||
|
|
3b49440c25 | ||
|
|
7c0287b824 | ||
|
|
f97c2c85bd | ||
|
|
14c0d8a93e | ||
|
|
768ad4de2a | ||
|
|
817987dfc4 | ||
|
|
eb090d3544 | ||
|
|
4daf8b7083 | ||
|
|
0be69018b8 | ||
|
|
9b9ab5f3e8 | ||
|
|
072464cbdb | ||
|
|
b0491dec88 | ||
|
|
ccfefedb47 | ||
|
|
2fffcf9e7f | ||
|
|
a39a5d261c | ||
|
|
45b57822d5 | ||
|
|
d8984cd37f | ||
|
|
80e63af470 | ||
|
|
db2c38b21b | ||
|
|
cef51d58ac | ||
|
|
e0b5a13a13 | ||
|
|
de21356154 | ||
|
|
35a4de2030 | ||
|
|
847625822f | ||
|
|
3877df4e62 | ||
|
|
ec73d2fb9a | ||
|
|
a7689d7023 | ||
|
|
847a44e7ad | ||
|
|
b3710c962e | ||
|
|
35ccfe1721 | ||
|
|
ef2bfb9718 | ||
|
|
a97effa27c | ||
|
|
01adee7554 | ||
|
|
78a76b0d29 | ||
|
|
e775328523 | ||
|
|
50344e7792 | ||
|
|
d58fdb10db | ||
|
|
feaacfd226 | ||
|
|
e3c238ac95 | ||
|
|
752997c5e8 | ||
|
|
71edc75ca6 | ||
|
|
768e4c4735 | ||
|
|
c553ad5158 | ||
|
|
c66b901320 | ||
|
|
dd67a3d5f5 | ||
|
|
e972f2c98a | ||
|
|
acbcb1ea9d | ||
|
|
d4444375ac | ||
|
|
00bf40a8ef | ||
|
|
5d1f947f32 | ||
|
|
b594cb9430 | ||
|
|
add7a35e55 | ||
|
|
2af7b61fc3 | ||
|
|
cb97c2b0d3 | ||
|
|
35da38e93f | ||
|
|
963c0f28b9 | ||
|
|
b3815dc0c2 | ||
|
|
8053fc4e16 | ||
|
|
66c3f2f31f | ||
|
|
62c9074132 | ||
|
|
a854cb9617 | ||
|
|
fbf9942fe7 | ||
|
|
f425950a52 | ||
|
|
1d40bc1901 | ||
|
|
ba51409c3c | ||
|
|
a64fc05385 | ||
|
|
4d54454900 | ||
|
|
5601652d65 | ||
|
|
b218bc5bed | ||
|
|
65eee674b9 | ||
|
|
72eb74e94a | ||
|
|
6bfec25165 | ||
|
|
1c61d51448 | ||
|
|
f7fe1d766b | ||
|
|
55aec19389 | ||
|
|
9db51117dc | ||
|
|
a9c9467210 | ||
|
|
f50e15c77c | ||
|
|
e3191d096f | ||
|
|
07c40780b3 | ||
|
|
67b82b4a28 | ||
|
|
5f47e1e034 | ||
|
|
e92cb9e8f8 | ||
|
|
9ea990d5a2 | ||
|
|
08b9ede217 | ||
|
|
6342499c47 | ||
|
|
f347a198f7 | ||
|
|
060642ad14 | ||
|
|
629c0d0f65 | ||
|
|
f7404f52e7 | ||
|
|
74a321e156 | ||
|
|
fce885c0cd | ||
|
|
4028a245b0 | ||
|
|
c5b07a6714 | ||
|
|
b0965bf34f | ||
|
|
1eaca9fb45 | ||
|
|
d833e49db9 | ||
|
|
3aee544cee | ||
|
|
9e87f5090f | ||
|
|
c8cfa43ccc | ||
|
|
ed7af3f370 | ||
|
|
be19d6a403 | ||
|
|
46858ee6fe | ||
|
|
a94e4d803b | ||
|
|
dcbe62ab0a | ||
|
|
121b981b49 | ||
|
|
73bb9322f5 | ||
|
|
bdc2278a30 | ||
|
|
ea8d13d841 | ||
|
|
e45716cac2 | ||
|
|
c98dd8755c | ||
|
|
5ae5e1dd56 | ||
|
|
4f8ee736b1 | ||
|
|
816e68a274 | ||
|
|
6ab6c8eefa | ||
|
|
cb16f42075 | ||
|
|
7ae84a3c91 | ||
|
|
2fd543c989 | ||
|
|
50cf97fc72 | ||
|
|
4acd68188b | ||
|
|
b81b6da3fc | ||
|
|
56ad6aac4d | ||
|
|
1efb8ea280 | ||
|
|
9cfc01f791 | ||
|
|
86014cebd7 | ||
|
|
507f861c67 | ||
|
|
e073720a8f | ||
|
|
ce7cdadb71 | ||
|
|
a223b78872 | ||
|
|
d5181118cc | ||
|
|
886b3abac1 | ||
|
|
250f8d9371 | ||
|
|
8a429d12cf | ||
|
|
8bf4697dc2 | ||
|
|
584523672c | ||
|
|
a9585efd64 | ||
|
|
f6b1f05e0f | ||
|
|
cc8538e0d1 | ||
|
|
f7d9b15707 | ||
|
|
83406bc473 | ||
|
|
1cfce703b2 | ||
|
|
3b24a4cada | ||
|
|
135adb426e | ||
|
|
987dac9fe5 | ||
|
|
7fde48a805 | ||
|
|
ce9028bb5b | ||
|
|
52688a63c6 | ||
|
|
8904e81cdf | ||
|
|
bcbe393af3 | ||
|
|
47aada16a0 | ||
|
|
c22d04aa30 | ||
|
|
354b4f19ec | ||
|
|
0ed1857fa9 | ||
|
|
dfadd98969 | ||
|
|
19a8b66cee | ||
|
|
07dee18d6b | ||
|
|
70e8b11805 | ||
|
|
9d574c0d63 | ||
|
|
2e21c58e6a | ||
|
|
506342317b | ||
|
|
979bb07c86 | ||
|
|
dfeae0e70a | ||
|
|
f43a9ac17e | ||
|
|
c3ac9319f4 | ||
|
|
76ee3060d1 | ||
|
|
4bb241c435 | ||
|
|
a06f4c2514 | ||
|
|
53aa03cc44 | ||
|
|
1ce0b45965 | ||
|
|
7078311a84 | ||
|
|
ef9b717961 | ||
|
|
09246ed9d5 | ||
|
|
33ea55efed | ||
|
|
79474b2e4c | ||
|
|
fb001b6c01 | ||
|
|
2896f51a22 | ||
|
|
5b9115d87a | ||
|
|
211b08f771 | ||
|
|
f0905499e3 | ||
|
|
7985df3768 | ||
|
|
095c7bd801 | ||
|
|
23469c9c7c | ||
|
|
2347762b0d | ||
|
|
636fb5344a | ||
|
|
aaa8b7738a | ||
|
|
bc4282e49e | ||
|
|
2812816142 | ||
|
|
ceeac84cfe | ||
|
|
83d48f65b6 | ||
|
|
95d0410baa | ||
|
|
2708a7569e | ||
|
|
45e8bea8d0 | ||
|
|
f980f230c5 | ||
|
|
e204f89685 | ||
|
|
f7efce594b | ||
|
|
1fb6ad700f | ||
|
|
e3fe31f7cb | ||
|
|
8b96933e58 | ||
|
|
d69b96a94c | ||
|
|
d846210978 | ||
|
|
30c8b1b84f | ||
|
|
43e0929339 | ||
|
|
6c70c42577 | ||
|
|
cd2c06f2a7 | ||
|
|
af55a74bd2 | ||
|
|
d00c126cef | ||
|
|
bedf6e90d2 | ||
|
|
e8c84d8b53 | ||
|
|
f89ff3872d | ||
|
|
127f0fc64c | ||
|
|
0cfa89f316 | ||
|
|
bfcd4113c3 | ||
|
|
0e7fc7613f | ||
|
|
8ac2f52b6e | ||
|
|
1973fc1ecc | ||
|
|
7c39a13281 | ||
|
|
c5c503cbbe | ||
|
|
d09488b829 | ||
|
|
0a6196716c | ||
|
|
8bc9b2b883 | ||
|
|
a15f50254a | ||
|
|
5d4f77a022 | ||
|
|
a089de0964 | ||
|
|
3068ae8447 | ||
|
|
67ff153b0c | ||
|
|
3e1cb8302a | ||
|
|
e4a87f772f | ||
|
|
d4f38d45a5 | ||
|
|
bbe7eb35f1 | ||
|
|
87e54f2dde | ||
|
|
3f3afe489f | ||
|
|
70b21d9c87 | ||
|
|
e00bf3d723 | ||
|
|
605f2b819a | ||
|
|
bf2b975359 | ||
|
|
00a5086ff2 | ||
|
|
be6a888e50 | ||
|
|
dad8447423 | ||
|
|
65ff109065 | ||
|
|
b7253fc1c1 | ||
|
|
d143f576c6 | ||
|
|
a152351a71 | ||
|
|
a2fa1370c5 | ||
|
|
bed83b0b64 | ||
|
|
cf0bdad5de | ||
|
|
85d35ef03c | ||
|
|
514d10b314 | ||
|
|
5164c3d2d0 | ||
|
|
ffdd0719e7 | ||
|
|
4e2b5389d7 | ||
|
|
dc4e63631f | ||
|
|
275bf456d3 | ||
|
|
7dfa871095 | ||
|
|
70cc88de22 | ||
|
|
4bc0f46955 | ||
|
|
5b09599a23 | ||
|
|
f4dd8e3fe8 | ||
|
|
d0888edc0a | ||
|
|
51a230d7fd | ||
|
|
fc5b14b620 | ||
|
|
bbddadbd04 | ||
|
|
7428e47ebc | ||
|
|
72083c65ad | ||
|
|
70f92fd6b3 | ||
|
|
a86cedbc24 | ||
|
|
0906f8dd3b | ||
|
|
664213cedb | ||
|
|
75a7226174 | ||
|
|
9e925becb6 | ||
|
|
e3a5bb9b48 | ||
|
|
b7eeb0e260 | ||
|
|
84d64ddabc | ||
|
|
6c9f92aee6 | ||
|
|
893297760b | ||
|
|
c5c56cda02 | ||
|
|
2295123cad | ||
|
|
ff0280c0cb | ||
|
|
64d736a57b | ||
|
|
5f1d5a1897 | ||
|
|
aac2406e19 | ||
|
|
6dc28ef50a | ||
|
|
66def93373 | ||
|
|
c58023a9ba | ||
|
|
3edc9ff0b0 | ||
|
|
8e8ae1edc7 | ||
|
|
20b00db390 | ||
|
|
db4bbf9521 | ||
|
|
2b7994e739 | ||
|
|
e7fbdac8e0 | ||
|
|
41ec712aa9 | ||
|
|
17acae2b00 | ||
|
|
57261c7e97 | ||
|
|
d8239e0194 | ||
|
|
004c3796de | ||
|
|
18c7549770 | ||
|
|
e5190f14ce | ||
|
|
433b73a5a8 | ||
|
|
ab88a3341f | ||
|
|
181da3ce9b | ||
|
|
b14a58c9b8 | ||
|
|
60cc2cba1f | ||
|
|
c797494d88 | ||
|
|
e2a57182be | ||
|
|
8928441466 | ||
|
|
0e8965060f | ||
|
|
f3cf6fcdd7 | ||
|
|
18ccf0f871 | ||
|
|
313647bcf3 | ||
|
|
61fe068c90 | ||
|
|
5c49096e11 | ||
|
|
a73c78545d | ||
|
|
e0fd560711 | ||
|
|
6a56ac1032 | ||
|
|
96299629b4 | ||
|
|
75de30cfa8 | ||
|
|
233bed6a73 | ||
|
|
b3964efe4d | ||
|
|
575f061629 | ||
|
|
640d7d3b4e | ||
|
|
e92294b482 | ||
|
|
22937e8982 | ||
|
|
c3d1474eb9 | ||
|
|
e2426ea87b | ||
|
|
e58a61175f | ||
|
|
05bea46c3e | ||
|
|
c8a719ae0d |
46
.github/ISSUE_TEMPLATE/Bug.md
vendored
46
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -5,19 +5,31 @@ about: Report a problem with rclone
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
**STOP and READ**
|
||||
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||
Please show the effort you've put into solving the problem and please be specific.
|
||||
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||
|
||||
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||
|
||||
If you can still replicate it or just got a question then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
for a quick response instead of filing an issue on this repo.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
If nothing else helps, then please fill in the info below which helps us help you.
|
||||
|
||||
https://beta.rclone.org/
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
**DO NOT REDACT** any information except passwords/keys/personal info.
|
||||
|
||||
You should use 3 backticks to begin and end your paste to make it readable.
|
||||
|
||||
Make sure to include a log obtained with '-vv'.
|
||||
|
||||
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
|
||||
|
||||
Thank you
|
||||
|
||||
@@ -25,6 +37,10 @@ The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
@@ -33,18 +49,26 @@ The Rclone Developers
|
||||
|
||||
|
||||
|
||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
#### Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
||||
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (eg Google Drive)
|
||||
#### Which cloud storage system are you using? (e.g. Google Drive)
|
||||
|
||||
|
||||
|
||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
#### The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
<!--- Please keep the note below for others who read your bug report. -->
|
||||
|
||||
#### How to use GitHub
|
||||
|
||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
||||
* Subscribe to receive notifications on status change and new comments.
|
||||
|
||||
23
.github/ISSUE_TEMPLATE/Feature.md
vendored
23
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -7,12 +7,16 @@ about: Suggest a new feature or enhancement for rclone
|
||||
|
||||
Welcome :-)
|
||||
|
||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
So you've got an idea to improve rclone? We love that!
|
||||
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
|
||||
Here is a checklist of things to do:
|
||||
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
|
||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||
|
||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum first: https://forum.rclone.org/
|
||||
If it still isn't there, here is a checklist of things to do:
|
||||
|
||||
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum: https://forum.rclone.org/
|
||||
3. Make a feature request issue (this is the right place!).
|
||||
4. Be prepared to get involved making the feature :-)
|
||||
|
||||
@@ -22,6 +26,9 @@ The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
|
||||
#### What is your current rclone version (output from `rclone version`)?
|
||||
|
||||
@@ -34,3 +41,11 @@ The Rclone Developers
|
||||
#### How do you think rclone should be changed to solve that?
|
||||
|
||||
|
||||
|
||||
<!--- Please keep the note below for others who read your feature request. -->
|
||||
|
||||
#### How to use GitHub
|
||||
|
||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
||||
* Subscribe to receive notifications on status change and new comments.
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
|
||||
|
||||
#### Checklist
|
||||
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
|
||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||
- [ ] I have added documentation for the changes if appropriate.
|
||||
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
|
||||
207
.github/workflows/build.yml
vendored
207
.github/workflows/build.yml
vendored
@@ -12,79 +12,86 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.11', 'go1.12', 'go1.13', 'go1.14']
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.15', 'go1.16']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
go: '1.17.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac
|
||||
- job_name: mac_amd64
|
||||
os: macOS-latest
|
||||
go: '1.15.x'
|
||||
go: '1.17.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.15.x'
|
||||
go: '1.17.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.15.x'
|
||||
go: '1.17.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
go: '1.17.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.11
|
||||
- job_name: go1.15
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
go: '1.15.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.12
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.13
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.14
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -107,10 +114,11 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
@@ -125,7 +133,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
@@ -133,10 +141,10 @@ jobs:
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
@@ -181,12 +189,13 @@ jobs:
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Code quality test
|
||||
- name: Run librclone tests
|
||||
shell: bash
|
||||
run: |
|
||||
make build_dep
|
||||
make check
|
||||
if: matrix.check
|
||||
make -C librclone/ctest test
|
||||
make -C librclone/ctest clean
|
||||
librclone/python/test_rclone.py
|
||||
if: matrix.librclonetest
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
@@ -207,46 +216,122 @@ jobs:
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
xgo:
|
||||
timeout-minutes: 60
|
||||
name: "xgo cross compile"
|
||||
lint:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||
path: ./src/github.com/rclone/rclone
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
- name: Set environment variables
|
||||
android:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
|
||||
# xgo \
|
||||
# -image=billziss/xgo-cgofuse \
|
||||
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
# -tags cmount \
|
||||
# -dest build \
|
||||
# .
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=android/*,ios/* \
|
||||
-dest build \
|
||||
.
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
- name: build native rclone
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make ci_upload
|
||||
|
||||
@@ -7,6 +7,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
@@ -15,7 +16,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
|
||||
@@ -6,6 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
@@ -23,7 +24,7 @@ jobs:
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
@@ -31,3 +32,28 @@ jobs:
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
*~
|
||||
_junk/
|
||||
rclone
|
||||
rclone.exe
|
||||
build
|
||||
docs/public
|
||||
rclone.iml
|
||||
@@ -10,3 +11,7 @@ rclone.iml
|
||||
*.log
|
||||
*.iml
|
||||
fuzz-build.zip
|
||||
*.orig
|
||||
*.rej
|
||||
Thumbs.db
|
||||
__pycache__
|
||||
|
||||
@@ -5,7 +5,7 @@ linters:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- golint
|
||||
- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
@@ -24,3 +24,7 @@ issues:
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
235
CONTRIBUTING.md
235
CONTRIBUTING.md
@@ -12,94 +12,164 @@ When filing an issue, please include the following information if
|
||||
possible as well as a description of the problem. Make sure you test
|
||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
|
||||
* Rclone version (eg output from `rclone -V`)
|
||||
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
* Rclone version (e.g. output from `rclone version`)
|
||||
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
|
||||
## Submitting a pull request ##
|
||||
## Submitting a new feature or bug fix ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
||||
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's GitHub
|
||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
||||
page](https://github.com/rclone/rclone).
|
||||
|
||||
Now in your terminal
|
||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||
|
||||
go get -u github.com/rclone/rclone
|
||||
cd $GOPATH/src/github.com/rclone/rclone
|
||||
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
||||
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
# if you have SSH keys setup in your GitHub account:
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
# otherwise:
|
||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||
|
||||
Make a branch to add your new feature
|
||||
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
||||
|
||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||
|
||||
go version
|
||||
|
||||
Great, you can now compile and execute your own version of rclone:
|
||||
|
||||
go build
|
||||
./rclone version
|
||||
|
||||
(Note that you can also replace `go build` with `make`, which will include a
|
||||
more accurate version number in the executable as well as enable you to specify
|
||||
more build options.) Finally make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
|
||||
And get hacking.
|
||||
|
||||
When ready - run the unit tests for the code you changed
|
||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
||||
|
||||
When ready - test the affected functionality and run the unit tests for the code you changed
|
||||
|
||||
cd folder/with/changed/files
|
||||
go test -v
|
||||
|
||||
Note that you may need to make a test remote, eg `TestSwift` for some
|
||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
Note the top level Makefile targets
|
||||
|
||||
* make check
|
||||
* make test
|
||||
|
||||
Both of these will be run by Travis when you make a pull request but
|
||||
you can do this yourself locally too. These require some extra go
|
||||
packages which you can install with
|
||||
|
||||
* make build_dep
|
||||
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add [unit tests](#testing) for a new feature.
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* Follow the [commit message guidelines](#commit-messages).
|
||||
* Add [unit tests](#testing) for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master with `git rebase master`
|
||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||
|
||||
When you are done with that
|
||||
When you are done with that push your changes to Github:
|
||||
|
||||
git push origin my-new-feature
|
||||
git push -u origin my-new-feature
|
||||
|
||||
Go to the GitHub website and click [Create pull
|
||||
and open the GitHub website to [create your pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
|
||||
```
|
||||
git log # See how many commits you want to squash
|
||||
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
||||
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||
git commit # Add a new commit message.
|
||||
git push --force # Push the squashed commit to your GitHub repo.
|
||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
|
||||
```
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
|
||||
## CI for your fork ##
|
||||
## Using Git and Github ##
|
||||
|
||||
### Committing your changes ###
|
||||
|
||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||
|
||||
git checkout my-new-feature # To switch to your branch
|
||||
git status # To see the new and changed files
|
||||
git add FILENAME # To select FILENAME for the commit
|
||||
git status # To verify the changes to be committed
|
||||
git commit # To do the commit
|
||||
git log # To verify the commit. Use q to quit the log
|
||||
|
||||
You can modify the message or changes in the latest commit using:
|
||||
|
||||
git commit --amend
|
||||
|
||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Replacing your previously pushed commits ###
|
||||
|
||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||
|
||||
Your previously pushed commits are replaced by:
|
||||
|
||||
git push --force origin my-new-feature
|
||||
|
||||
### Basing your changes on the latest master ###
|
||||
|
||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge --ff-only
|
||||
git push origin --follow-tags # optional update of your fork in GitHub
|
||||
git checkout my-new-feature
|
||||
git rebase master
|
||||
|
||||
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Squashing your commits ###
|
||||
|
||||
To combine your commits into one commit:
|
||||
|
||||
git log # To count the commits to squash, e.g. the last 2
|
||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||
git status # To check everything is as expected
|
||||
|
||||
If everything is fine, then make the new combined commit:
|
||||
|
||||
git commit # To commit the undone commits as one
|
||||
|
||||
otherwise, you may roll back using:
|
||||
|
||||
git reflog # To check that HEAD{1} is your previous state
|
||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||
|
||||
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||
|
||||
### GitHub Continuous Integration ###
|
||||
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||
|
||||
## Testing ##
|
||||
|
||||
### Quick testing ###
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
|
||||
|
||||
You can also use `make`, if supported by your platform
|
||||
|
||||
make quicktest
|
||||
|
||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||
|
||||
### Backend testing ###
|
||||
|
||||
rclone contains a mixture of unit tests and integration tests.
|
||||
Because it is difficult (and in some respects pointless) to test cloud
|
||||
storage systems by mocking all their interfaces, rclone unit tests can
|
||||
@@ -115,8 +185,8 @@ are skipped if `TestDrive:` isn't defined.
|
||||
cd backend/drive
|
||||
go test -v
|
||||
|
||||
You can then run the integration tests which tests all of rclone's
|
||||
operations. Normally these get run against the local filing system,
|
||||
You can then run the integration tests which test all of rclone's
|
||||
operations. Normally these get run against the local file system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd fs/sync
|
||||
@@ -127,18 +197,25 @@ but they can be run against any of the remotes.
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
all together with an HTML report and test retries then from the
|
||||
altogether with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
### Full integration testing ###
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
make check
|
||||
make test
|
||||
|
||||
This command is run daily on the integration test server. You can
|
||||
The commands may require some extra go packages which you can install with
|
||||
|
||||
make build_dep
|
||||
|
||||
The full integration tests are run daily on the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
|
||||
## Code Organisation ##
|
||||
@@ -146,16 +223,17 @@ find the results at https://pub.rclone.org/integration-tests/
|
||||
Rclone code is organised into a small number of top level directories
|
||||
with modules beneath.
|
||||
|
||||
* backend - the rclone backends for interfacing to cloud providers -
|
||||
* backend - the rclone backends for interfacing to cloud providers -
|
||||
* all - import this to load all the cloud providers
|
||||
* ...providers
|
||||
* bin - scripts for use while building or maintaining rclone
|
||||
* cmd - the rclone commands
|
||||
* all - import this to load all the commands
|
||||
* ...commands
|
||||
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||
* docs - the documentation and website
|
||||
* content - adjust these docs only - everything else is autogenerated
|
||||
* command - these are auto generated - edit the corresponding .go file
|
||||
* command - these are auto-generated - edit the corresponding .go file
|
||||
* fs - main rclone definitions - minimal amount of code
|
||||
* accounting - bandwidth limiting and statistics
|
||||
* asyncreader - an io.Reader which reads ahead
|
||||
@@ -170,7 +248,7 @@ with modules beneath.
|
||||
* log - logging facilities
|
||||
* march - iterates directories in lock step
|
||||
* object - in memory Fs objects
|
||||
* operations - primitives for sync, eg Copy, Move
|
||||
* operations - primitives for sync, e.g. Copy, Move
|
||||
* sync - sync directories
|
||||
* walk - walk a directory
|
||||
* fstest - provides integration test framework
|
||||
@@ -178,7 +256,7 @@ with modules beneath.
|
||||
* mockdir - mocks an fs.Directory
|
||||
* mockobject - mocks an fs.Object
|
||||
* test_all - Runs integration tests for everything
|
||||
* graphics - the images used in the website etc
|
||||
* graphics - the images used in the website, etc.
|
||||
* lib - libraries used by the backend
|
||||
* atexit - register functions to run when rclone exits
|
||||
* dircache - directory ID to name caching
|
||||
@@ -197,18 +275,39 @@ If you add a new general flag (not for a backend), then document it in
|
||||
alphabetical order.
|
||||
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field. The first line of this is used
|
||||
for the flag help, the remainder is shown to the user in `rclone
|
||||
config` and is added to the docs with `make backenddocs`.
|
||||
the source file in the `Help:` field.
|
||||
|
||||
* Start with the most important information about the option,
|
||||
as a single sentence on a single line.
|
||||
* This text will be used for the command-line flag help.
|
||||
* It will be combined with other information, such as any default value,
|
||||
and the result will look odd if not written as a single sentence.
|
||||
* It should end with a period/full stop character, which will be shown
|
||||
in docs but automatically removed when producing the flag help.
|
||||
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||
* Like with docs generated from Markdown, a single line break is ignored
|
||||
and two line breaks creates a new paragraph.
|
||||
* This text will be shown to the user in `rclone config`
|
||||
and in the docs (where it will be added by `make backenddocs`,
|
||||
normally run some time before next release).
|
||||
* To create options of enumeration type use the `Examples:` field.
|
||||
* Each example value have their own `Help:` field, but they are treated
|
||||
a bit different than the main option help text. They will be shown
|
||||
as an unordered list, therefore a single line break is enough to
|
||||
create a new list item. Also, for enumeration texts like name of
|
||||
countries, it looks better without an ending period/full stop character.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
|
||||
Documentation for rclone sub commands is with their code, eg
|
||||
`cmd/ls/ls.go`.
|
||||
Documentation for rclone sub commands is with their code, e.g.
|
||||
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
|
||||
line, without a period/full stop character at the end, as it will be
|
||||
combined unmodified with other information (such as any default value).
|
||||
|
||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy.
|
||||
@@ -251,7 +350,7 @@ And here is an example of a longer one:
|
||||
```
|
||||
mount: fix hang on errored upload
|
||||
|
||||
In certain circumstances if an upload failed then the mount could hang
|
||||
In certain circumstances, if an upload failed then the mount could hang
|
||||
indefinitely. This was fixed by closing the read pipe after the Put
|
||||
completed. This will cause the write side to return a pipe closed
|
||||
error fixing the hang.
|
||||
@@ -265,7 +364,7 @@ rclone uses the [go
|
||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||
support in go1.11 and later to manage its dependencies.
|
||||
|
||||
rclone can be built with modules outside of the GOPATH
|
||||
rclone can be built with modules outside of the `GOPATH`.
|
||||
|
||||
To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency and add it to
|
||||
@@ -283,7 +382,7 @@ and `go.sum` in the same commit as your other changes.
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
GO111MODULE=on go get -u github.com/pkg/errors
|
||||
GO111MODULE=on go get -u golang.org/x/crypto
|
||||
|
||||
Check in a single commit as above.
|
||||
|
||||
@@ -326,15 +425,15 @@ Research
|
||||
Getting going
|
||||
|
||||
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
* box is a good one to start from if you have a directory based remote
|
||||
* b2 is a good one to start from if you have a bucket based remote
|
||||
* box is a good one to start from if you have a directory-based remote
|
||||
* b2 is a good one to start from if you have a bucket-based remote
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Unit tests
|
||||
@@ -364,7 +463,7 @@ See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from
|
||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||
alphabetical order of full name of remote (eg `drive` is ordered as
|
||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
|
||||
* `README.md` - main GitHub page
|
||||
@@ -400,7 +499,7 @@ Usage
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
|
||||
|
||||
Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
|
||||
@@ -16,6 +16,8 @@ RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone
|
||||
|
||||
ENTRYPOINT [ "rclone" ]
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
@@ -11,7 +11,7 @@ Current active maintainers of rclone are:
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
@@ -19,7 +19,7 @@ Current active maintainers of rclone are:
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
|
||||
|
||||
## Triaging Tickets ##
|
||||
|
||||
@@ -27,17 +27,17 @@ When a ticket comes in it should be triaged. This means it should be classified
|
||||
|
||||
Rclone uses the labels like this:
|
||||
|
||||
* `bug` - a definite verified bug
|
||||
* `bug` - a definitely verified bug
|
||||
* `can't reproduce` - a problem which we can't reproduce
|
||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
* `enhancement: new remote` - a new rclone backend
|
||||
* `enhancement` - a new feature
|
||||
* `FUSE` - to do with `rclone mount` command
|
||||
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
|
||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
* `maintenance` - internal enhancement, code re-organisation etc
|
||||
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
* `Remote: XXX` - which rclone backend this affects
|
||||
@@ -45,13 +45,13 @@ Rclone uses the labels like this:
|
||||
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
|
||||
|
||||
The milestones have these meanings:
|
||||
|
||||
* v1.XX - stuff we would like to fit into this release
|
||||
* v1.XX+1 - stuff we are leaving until the next release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
|
||||
@@ -65,7 +65,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
@@ -81,15 +81,15 @@ Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
||||
|
||||
High impact regressions should be fixed before the next release.
|
||||
|
||||
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
|
||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
|
||||
## Mailing list ##
|
||||
|
||||
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
|
||||
## TODO ##
|
||||
|
||||
|
||||
25729
MANUAL.html
generated
25729
MANUAL.html
generated
File diff suppressed because one or more lines are too long
28628
MANUAL.txt
generated
28628
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
61
Makefile
61
Makefile
@@ -46,13 +46,13 @@ endif
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
rclone:
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@@ -93,8 +93,7 @@ build_dep:
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
@@ -105,10 +104,14 @@ showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
|
||||
# Update direct dependencies only
|
||||
updatedirect:
|
||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
# Update direct and indirect dependencies and test dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -u -t ./...
|
||||
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go get -d -u -t ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
# Tidy the module dependencies
|
||||
@@ -120,7 +123,7 @@ doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs rcdocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
@@ -188,10 +191,10 @@ upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
|
||||
@@ -199,23 +202,23 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
ci_upload:
|
||||
sudo chown -R $$USER build
|
||||
find build -type l -delete
|
||||
gzip -r9v build
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
ci_beta:
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
@@ -233,7 +236,7 @@ tag: retag doc
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(VERSION)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
@echo "And finally run make retag before make cross, etc."
|
||||
|
||||
retag:
|
||||
@echo "Version is $(VERSION)"
|
||||
@@ -257,3 +260,33 @@ startstable:
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
# docker volume plugin
|
||||
PLUGIN_USER ?= rclone
|
||||
PLUGIN_TAG ?= latest
|
||||
PLUGIN_BASE_TAG ?= latest
|
||||
PLUGIN_ARCH ?= amd64
|
||||
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
|
||||
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
|
||||
PLUGIN_BUILD_DIR := ./build/docker-plugin
|
||||
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
|
||||
|
||||
docker-plugin-create:
|
||||
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
|
||||
docker run --rm --privileged tonistiigi/binfmt --install all
|
||||
rm -rf ${PLUGIN_BUILD_DIR}
|
||||
docker buildx build \
|
||||
--no-cache --pull \
|
||||
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
|
||||
--platform linux/${PLUGIN_ARCH} \
|
||||
--output ${PLUGIN_BUILD_DIR}/rootfs \
|
||||
${PLUGIN_CONTRIB_DIR}
|
||||
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
|
||||
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
|
||||
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
|
||||
|
||||
docker-plugin-push:
|
||||
docker plugin push ${PLUGIN_IMAGE}
|
||||
docker plugin rm ${PLUGIN_IMAGE}
|
||||
|
||||
docker-plugin: docker-plugin-create docker-plugin-push
|
||||
|
||||
17
README.md
17
README.md
@@ -2,7 +2,7 @@
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Download](https://rclone.org/downloads/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
@@ -10,12 +10,12 @@
|
||||
|
||||
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://hub.docker.com/r/rclone/rclone)
|
||||
|
||||
# Rclone
|
||||
|
||||
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
|
||||
|
||||
## Storage providers
|
||||
|
||||
@@ -30,11 +30,12 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
@@ -58,17 +59,21 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
## Features
|
||||
@@ -81,8 +86,8 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||
|
||||
46
RELEASE.md
46
RELEASE.md
@@ -4,7 +4,7 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Extra required software for making a release
|
||||
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
## Making a release
|
||||
@@ -21,7 +21,7 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push --tags origin master
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
@@ -34,13 +34,24 @@ This file describes how to make the various kinds of releases
|
||||
* make startdev # make startstable for stable branch
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
|
||||
## Update dependencies
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
* make updatedirect
|
||||
* make
|
||||
* git commit -a -v
|
||||
* make update
|
||||
* make
|
||||
* roll back any updates which didn't compile
|
||||
* git commit -a -v --amend
|
||||
|
||||
Note that `make update` updates all direct and indirect dependencies
|
||||
and there can occasionally be forwards compatibility problems with
|
||||
doing that so it may be necessary to roll back dependencies to the
|
||||
version specified by `make updatedirect` in order to get rclone to
|
||||
build.
|
||||
|
||||
## Making a point release
|
||||
|
||||
@@ -48,8 +59,8 @@ If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
Set vars
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
||||
* BASE_TAG=v1.XX # e.g. v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
@@ -65,9 +76,8 @@ Now
|
||||
* git cherry-pick any fixes
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* NB this overwrites the current beta so we need to do this - FIXME is this true any more?
|
||||
* git co master
|
||||
* # cherry pick the changes to the changelog
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
@@ -77,6 +87,24 @@ Now
|
||||
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||
or needs to be updated then rebuild like this.
|
||||
|
||||
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||
|
||||
```
|
||||
git co v1.54.1
|
||||
docker pull golang
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
docker buildx create --name actions_builder --use
|
||||
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx stop actions_builder
|
||||
```
|
||||
|
||||
### Old build for linux/amd64 only
|
||||
|
||||
```
|
||||
docker pull golang
|
||||
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
@@ -19,7 +20,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Required: true,
|
||||
}},
|
||||
}
|
||||
@@ -34,7 +35,7 @@ type Options struct {
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -47,5 +48,5 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
return cache.Get(fspath.JoinRootPath(opt.Remote, root))
|
||||
return cache.Get(ctx, fspath.JoinRootPath(opt.Remote, root))
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -19,7 +20,7 @@ var (
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
config.LoadConfig()
|
||||
configfile.Install()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
@@ -54,21 +55,22 @@ func TestNewFS(t *testing.T) {
|
||||
{"four/under four.txt", 9, false},
|
||||
}},
|
||||
{"four", "..", "", true, []testEntry{
|
||||
{"four", -1, true},
|
||||
{"one%.txt", 6, false},
|
||||
{"three", -1, true},
|
||||
{"two.html", 7, false},
|
||||
{"five", -1, true},
|
||||
{"under four.txt", 9, false},
|
||||
}},
|
||||
{"four", "../three", "", true, []testEntry{
|
||||
{"", "../../three", "", true, []testEntry{
|
||||
{"underthree.txt", 9, false},
|
||||
}},
|
||||
{"four", "../../five", "", true, []testEntry{
|
||||
{"underfive.txt", 6, false},
|
||||
}},
|
||||
} {
|
||||
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
||||
|
||||
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
||||
require.NoError(t, err, what)
|
||||
prepare(t, remoteRoot)
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||
require.NoError(t, err, what)
|
||||
gotEntries, err := f.List(context.Background(), test.fsList)
|
||||
require.NoError(t, err, what)
|
||||
@@ -90,7 +92,7 @@ func TestNewFS(t *testing.T) {
|
||||
|
||||
func TestNewFSNoRemote(t *testing.T) {
|
||||
prepare(t, "")
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, f)
|
||||
@@ -98,7 +100,7 @@ func TestNewFSNoRemote(t *testing.T) {
|
||||
|
||||
func TestNewFSInvalidRemote(t *testing.T) {
|
||||
prepare(t, "not_existing_test_remote:")
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, f)
|
||||
|
||||
@@ -9,13 +9,17 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
@@ -34,10 +38,13 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
_ "github.com/rclone/rclone/backend/zoho"
|
||||
)
|
||||
|
||||
@@ -14,16 +14,15 @@ we ignore assets completely!
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -70,11 +69,10 @@ func init() {
|
||||
Prefix: "acd",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: acdConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "checkpoint",
|
||||
@@ -83,16 +81,16 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
||||
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
|
||||
|
||||
Sometimes Amazon Drive gives an error when a file has been fully
|
||||
uploaded but the file appears anyway after a little while. This
|
||||
happens sometimes for files over 1GB in size and nearly every time for
|
||||
files bigger than 10GB. This parameter controls the time rclone waits
|
||||
happens sometimes for files over 1 GiB in size and nearly every time for
|
||||
files bigger than 10 GiB. This parameter controls the time rclone waits
|
||||
for the file to appear.
|
||||
|
||||
The default value for this parameter is 3 minutes per GB, so by
|
||||
default it will wait 3 minutes for every GB uploaded to see if the
|
||||
The default value for this parameter is 3 minutes per GiB, so by
|
||||
default it will wait 3 minutes for every GiB uploaded to see if the
|
||||
file appears.
|
||||
|
||||
You can disable this feature by setting it to 0. This may cause
|
||||
@@ -112,7 +110,7 @@ in this situation.`,
|
||||
|
||||
Files this size or more will be downloaded via their "tempLink". This
|
||||
is to work around a problem with Amazon Drive which blocks downloads
|
||||
of files bigger than about 10GB. The default for this is 9GB which
|
||||
of files bigger than about 10 GiB. The default for this is 9 GiB which
|
||||
shouldn't need to be changed.
|
||||
|
||||
To download files above this threshold, rclone requests a "tempLink"
|
||||
@@ -144,6 +142,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
ci *fs.ConfigInfo // global config
|
||||
c *acd.Client // the connection to the acd server
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
@@ -204,7 +203,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 401 {
|
||||
f.tokenRenewer.Invalidate()
|
||||
@@ -239,8 +241,7 @@ func filterRequest(req *http.Request) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -248,7 +249,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
@@ -256,29 +257,31 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||
return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
c: c,
|
||||
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||
noAuthClient: fshttp.NewClient(ctx),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.getRootInfo()
|
||||
_, err := f.getRootInfo(ctx)
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -286,16 +289,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, resp, err = f.c.Account.GetEndpoints()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get endpoints")
|
||||
return nil, fmt.Errorf("failed to get endpoints: %w", err)
|
||||
}
|
||||
|
||||
// Get rootID
|
||||
rootInfo, err := f.getRootInfo()
|
||||
rootInfo, err := f.getRootInfo(ctx)
|
||||
if err != nil || rootInfo.Id == nil {
|
||||
return nil, errors.Wrap(err, "failed to get root")
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
}
|
||||
f.trueRootID = *rootInfo.Id
|
||||
|
||||
@@ -335,11 +338,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// getRootInfo gets the root folder info
|
||||
func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
|
||||
func (f *Fs) getRootInfo(ctx context.Context) (rootInfo *acd.Folder, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rootInfo, resp, err = f.c.Nodes.GetRoot()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return rootInfo, err
|
||||
}
|
||||
@@ -378,7 +381,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == acd.ErrorNodeNotFound {
|
||||
@@ -405,7 +408,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -426,7 +429,7 @@ type listAllFn func(*acd.Node) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
query := "parents:" + dirID
|
||||
if directoriesOnly {
|
||||
query += " AND kind:" + folderKind
|
||||
@@ -447,7 +450,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
var resp *http.Response
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -502,11 +505,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
maxTries := f.ci.LowLevelRetries
|
||||
var iErr error
|
||||
for tries := 1; tries <= maxTries; tries++ {
|
||||
entries = nil
|
||||
_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
|
||||
_, err = f.listAll(ctx, directoryID, "", false, false, func(node *acd.Node) bool {
|
||||
remote := path.Join(dir, *node.Name)
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
@@ -523,7 +526,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
entries = append(entries, o)
|
||||
default:
|
||||
// ignore ASSET etc
|
||||
// ignore ASSET, etc.
|
||||
}
|
||||
return false
|
||||
})
|
||||
@@ -665,7 +668,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -680,7 +683,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -706,7 +709,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
|
||||
err = f.moveNode(ctx, srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -717,7 +720,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
dstObj fs.Object
|
||||
srcErr, dstErr error
|
||||
)
|
||||
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
||||
for i := 1; i <= f.ci.LowLevelRetries; i++ {
|
||||
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
||||
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
||||
// exit if error on source
|
||||
@@ -732,7 +735,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// finished if src not found and dst found
|
||||
break
|
||||
}
|
||||
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
|
||||
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return dstObj, dstErr
|
||||
@@ -745,7 +748,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -801,7 +804,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
var jsonStr string
|
||||
err = srcFs.pacer.Call(func() (bool, error) {
|
||||
jsonStr, err = srcInfo.GetMetadata()
|
||||
return srcFs.shouldRetry(nil, err)
|
||||
return srcFs.shouldRetry(ctx, nil, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
|
||||
@@ -813,7 +816,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return err
|
||||
}
|
||||
|
||||
err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
|
||||
err = f.moveNode(ctx, srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -838,7 +841,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if check {
|
||||
// check directory is empty
|
||||
empty := true
|
||||
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
||||
_, err = f.listAll(ctx, rootID, "", false, false, func(node *acd.Node) bool {
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
empty = false
|
||||
@@ -863,7 +866,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = node.Trash()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -893,7 +896,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -985,7 +988,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var info *acd.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == acd.ErrorNodeNotFound {
|
||||
@@ -1042,7 +1045,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
} else {
|
||||
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
|
||||
}
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return in, err
|
||||
}
|
||||
@@ -1065,7 +1068,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1075,70 +1078,70 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Remove a node
|
||||
func (f *Fs) removeNode(info *acd.Node) error {
|
||||
func (f *Fs) removeNode(ctx context.Context, info *acd.Node) error {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = info.Trash()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.removeNode(o.info)
|
||||
return o.fs.removeNode(ctx, o.info)
|
||||
}
|
||||
|
||||
// Restore a node
|
||||
func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
|
||||
func (f *Fs) restoreNode(ctx context.Context, info *acd.Node) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Restore()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
}
|
||||
|
||||
// Changes name of given node
|
||||
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
func (f *Fs) renameNode(ctx context.Context, info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
}
|
||||
|
||||
// Replaces one parent with another, effectively moving the file. Leaves other
|
||||
// parents untouched. ReplaceParent cannot be used when the file is trashed.
|
||||
func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error {
|
||||
func (f *Fs) replaceParent(ctx context.Context, info *acd.Node, oldParentID string, newParentID string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := info.ReplaceParent(oldParentID, newParentID)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Adds one additional parent to object.
|
||||
func (f *Fs) addParent(info *acd.Node, newParentID string) error {
|
||||
func (f *Fs) addParent(ctx context.Context, info *acd.Node, newParentID string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := info.AddParent(newParentID)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Remove given parent from object, leaving the other possible
|
||||
// parents untouched. Object can end up having no parents.
|
||||
func (f *Fs) removeParent(info *acd.Node, parentID string) error {
|
||||
func (f *Fs) removeParent(ctx context.Context, info *acd.Node, parentID string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := info.RemoveParent(parentID)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// moveNode moves the node given from the srcLeaf,srcDirectoryID to
|
||||
// the dstLeaf,dstDirectoryID
|
||||
func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
|
||||
func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
|
||||
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
|
||||
cantMove := fs.ErrorCantMove
|
||||
if useDirErrorMsgs {
|
||||
@@ -1152,7 +1155,7 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
|
||||
|
||||
if srcLeaf != dstLeaf {
|
||||
// fs.Debugf(name, "renaming")
|
||||
_, err = f.renameNode(srcInfo, dstLeaf)
|
||||
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: quick path rename failed: %v", err)
|
||||
goto OnConflict
|
||||
@@ -1160,7 +1163,7 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
|
||||
}
|
||||
if srcDirectoryID != dstDirectoryID {
|
||||
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
|
||||
err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
|
||||
err = f.replaceParent(ctx, srcInfo, srcDirectoryID, dstDirectoryID)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
|
||||
return err
|
||||
@@ -1173,13 +1176,13 @@ OnConflict:
|
||||
fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
|
||||
|
||||
// fs.Debugf(name, "Trashing file")
|
||||
err = f.removeNode(srcInfo)
|
||||
err = f.removeNode(ctx, srcInfo)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: remove node failed: %v", err)
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(name, "Renaming file")
|
||||
_, err = f.renameNode(srcInfo, dstLeaf)
|
||||
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: rename node failed: %v", err)
|
||||
return err
|
||||
@@ -1187,19 +1190,19 @@ OnConflict:
|
||||
// note: replacing parent is forbidden by API, modifying them individually is
|
||||
// okay though
|
||||
// fs.Debugf(name, "Adding target parent")
|
||||
err = f.addParent(srcInfo, dstDirectoryID)
|
||||
err = f.addParent(ctx, srcInfo, dstDirectoryID)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: addParent failed: %v", err)
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(name, "removing original parent")
|
||||
err = f.removeParent(srcInfo, srcDirectoryID)
|
||||
err = f.removeParent(ctx, srcInfo, srcDirectoryID)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: removeParent failed: %v", err)
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(name, "Restoring")
|
||||
_, err = f.restoreNode(srcInfo)
|
||||
_, err = f.restoreNode(ctx, srcInfo)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: restoreNode node failed: %v", err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
|
||||
//go:build acd
|
||||
// +build acd
|
||||
|
||||
package amazonclouddrive_test
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
@@ -27,11 +30,36 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"password": "my secret",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotNil(t, tokenRefresher)
|
||||
}
|
||||
}
|
||||
|
||||
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris js !go1.13
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package azureblob
|
||||
|
||||
137
backend/azureblob/imds.go
Normal file
137
backend/azureblob/imds.go
Normal file
@@ -0,0 +1,137 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
const (
|
||||
azureResource = "https://storage.azure.com"
|
||||
imdsAPIVersion = "2018-02-01"
|
||||
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
)
|
||||
|
||||
// This custom type is used to add the port the test server has bound to
|
||||
// to the request context.
|
||||
type testPortKey string
|
||||
|
||||
type msiIdentifierType int
|
||||
|
||||
const (
|
||||
msiClientID msiIdentifierType = iota
|
||||
msiObjectID
|
||||
msiResourceID
|
||||
)
|
||||
|
||||
type userMSI struct {
|
||||
Type msiIdentifierType
|
||||
Value string
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
Response *http.Response
|
||||
}
|
||||
|
||||
func (e httpError) Error() string {
|
||||
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||
}
|
||||
|
||||
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||
// Metadata Service.
|
||||
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||
result := adal.Token{}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||
return result, err
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("resource", azureResource)
|
||||
params.Set("api-version", imdsAPIVersion)
|
||||
|
||||
// Specify user-assigned identity if requested.
|
||||
if identity != nil {
|
||||
switch identity.Type {
|
||||
case msiClientID:
|
||||
params.Set("client_id", identity.Value)
|
||||
case msiObjectID:
|
||||
params.Set("object_id", identity.Value)
|
||||
case msiResourceID:
|
||||
params.Set("mi_res_id", identity.Value)
|
||||
default:
|
||||
// If this happens, the calling function and this one don't agree on
|
||||
// what valid ID types exist.
|
||||
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||
}
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
// The Metadata header is required by all calls to IMDS.
|
||||
req.Header.Set("Metadata", "true")
|
||||
|
||||
// If this function is run in a test, query the test server instead of IMDS.
|
||||
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||
if isTest {
|
||||
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||
req.Host = req.URL.Host
|
||||
}
|
||||
|
||||
// Send request
|
||||
httpClient := fshttp.NewClient(ctx)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||
}
|
||||
defer func() { // resp and Body should not be nil
|
||||
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||
}
|
||||
}()
|
||||
// Check if the status code indicates success
|
||||
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||
switch resp.StatusCode {
|
||||
case 200, 201, 202:
|
||||
break
|
||||
default:
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||
return result, httpError{Response: resp}
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
|
||||
}
|
||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
|
||||
// This would be a good place to persist the token if a large number of rclone
|
||||
// invocations are being made in a short amount of time. If the token is
|
||||
// persisted, the azureblob code will need to check for expiry before every
|
||||
// storage API call.
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
118
backend/azureblob/imds_test.go
Normal file
118
backend/azureblob/imds_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
require.NoError(t, err)
|
||||
parameters := r.URL.Query()
|
||||
(*actual)["path"] = r.URL.Path
|
||||
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||
(*actual)["method"] = r.Method
|
||||
for paramName := range parameters {
|
||||
(*actual)[paramName] = parameters.Get(paramName)
|
||||
}
|
||||
// Make response.
|
||||
response := adal.Token{}
|
||||
responseBytes, err := json.Marshal(response)
|
||||
require.NoError(t, err)
|
||||
_, err = w.Write(responseBytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestManagedIdentity(t *testing.T) {
|
||||
// test user-assigned identity specifiers to use
|
||||
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||
tests := []struct {
|
||||
identity *userMSI
|
||||
identityParameterName string
|
||||
expectedAbsent []string
|
||||
}{
|
||||
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||
}
|
||||
alwaysExpected := map[string]string{
|
||||
"path": "/metadata/identity/oauth2/token",
|
||||
"resource": "https://storage.azure.com",
|
||||
"Metadata": "true",
|
||||
"api-version": "2018-02-01",
|
||||
"method": "GET",
|
||||
}
|
||||
for _, test := range tests {
|
||||
actual := make(map[string]string, 10)
|
||||
testServer := httptest.NewServer(handler(t, &actual))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, test.identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate expected query parameters present
|
||||
expected := make(map[string]string)
|
||||
for k, v := range alwaysExpected {
|
||||
expected[k] = v
|
||||
}
|
||||
if test.identity != nil {
|
||||
expected[test.identityParameterName] = test.identity.Value
|
||||
}
|
||||
|
||||
for key := range expected {
|
||||
value, exists := actual[key]
|
||||
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||
test.identityParameterName, key) {
|
||||
assert.Equalf(t, expected[key], value,
|
||||
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate unexpected query parameters absent
|
||||
for _, key := range test.expectedAbsent {
|
||||
_, exists := actual[key]
|
||||
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errorHandler(resultCode int) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Test error generated", resultCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIMDSErrors(t *testing.T) {
|
||||
errorCodes := []int{404, 429, 500}
|
||||
for _, code := range errorCodes {
|
||||
testServer := httptest.NewServer(errorHandler(code))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, nil)
|
||||
require.Error(t, err)
|
||||
httpErr, ok := err.(httpError)
|
||||
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||
}
|
||||
}
|
||||
@@ -2,12 +2,11 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
@@ -63,16 +62,17 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
|
||||
//
|
||||
// Note that the passed filename's timestamp may still be invalid even if this
|
||||
// function returns true.
|
||||
func HasVersion(remote string) bool {
|
||||
return version.Match(remote)
|
||||
}
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := time.Time(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
return version.Add(remote, time.Time(t))
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
@@ -80,24 +80,9 @@ func (t Timestamp) AddVersion(remote string) string {
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
time, newRemote := version.Remove(remote)
|
||||
t = Timestamp(time)
|
||||
return
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is uninitialized
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
var (
|
||||
emptyT api.Timestamp
|
||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||
)
|
||||
|
||||
@@ -36,40 +35,6 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
|
||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||
}
|
||||
|
||||
func TestTimestampAddVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
t api.Timestamp
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||
{t1, "", "-v2001-02-03-040506-123"},
|
||||
} {
|
||||
actual := test.t.AddVersion(test.in)
|
||||
assert.Equal(t, test.expected, actual, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampRemoveVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedT api.Timestamp
|
||||
expectedRemote string
|
||||
}{
|
||||
{"potato.txt", emptyT, "potato.txt"},
|
||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||
{"-v2001-02-03-040506-123", t1, ""},
|
||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||
} {
|
||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsZero(t *testing.T) {
|
||||
assert.True(t, emptyT.IsZero())
|
||||
assert.False(t, t0.IsZero())
|
||||
|
||||
244
backend/b2/b2.go
244
backend/b2/b2.go
@@ -9,6 +9,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"errors"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
@@ -19,7 +20,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -44,18 +44,20 @@ const (
|
||||
timeHeader = headerPrefix + timeKey
|
||||
sha1Key = "large_file_sha1"
|
||||
sha1Header = "X-Bz-Content-Sha1"
|
||||
sha1InfoHeader = headerPrefix + sha1Key
|
||||
testModeHeader = "X-Bz-Test-Mode"
|
||||
idHeader = "X-Bz-File-Id"
|
||||
nameHeader = "X-Bz-File-Name"
|
||||
timestampHeader = "X-Bz-Upload-Timestamp"
|
||||
retryAfterHeader = "Retry-After"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5 * fs.MebiByte
|
||||
defaultChunkSize = 96 * fs.MebiByte
|
||||
defaultUploadCutoff = 200 * fs.MebiByte
|
||||
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
|
||||
minChunkSize = 5 * fs.Mebi
|
||||
defaultChunkSize = 96 * fs.Mebi
|
||||
defaultUploadCutoff = 200 * fs.Mebi
|
||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
@@ -73,15 +75,15 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "test_mode",
|
||||
@@ -101,7 +103,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -114,32 +116,34 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
|
||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
This value should be set no larger than 4.657 GiB (== 5 GB).`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
Help: `Cutoff for switching to multipart copy.
|
||||
|
||||
Any files larger than this that need to be server side copied will be
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 4.6GB.`,
|
||||
The minimum is 0 and the maximum is 4.6 GiB.`,
|
||||
Default: largeFileCopyCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size. Must fit in memory.
|
||||
Help: `Upload chunk size.
|
||||
|
||||
When uploading large files, chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||
minimum size.`,
|
||||
When uploading large files, chunk the file into this size.
|
||||
|
||||
Must fit in memory. These chunks are buffered in memory and there
|
||||
might a maximum of "--transfers" chunks in progress at once.
|
||||
|
||||
5,000,000 Bytes is the minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files
|
||||
Help: `Disable checksums for large (> upload cutoff) files.
|
||||
|
||||
Normally rclone will calculate the SHA1 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
@@ -153,7 +157,9 @@ to start uploading.`,
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
This is probably only useful for a public bucket.
|
||||
Rclone works with private buckets by sending an "Authorization" header.
|
||||
If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -214,6 +220,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
rootBucket string // bucket part of root (if any)
|
||||
@@ -290,7 +297,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
401, // Unauthorized (e.g. "Token has expired")
|
||||
408, // Request Timeout
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
@@ -300,7 +307,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetryNoAuth returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// For 429 or 503 errors look at the Retry-After: header and
|
||||
// set the retry appropriately, starting with a minimum of 1
|
||||
// second if it isn't set.
|
||||
@@ -331,7 +341,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
return f.shouldRetryNoReauth(resp, err)
|
||||
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
@@ -356,7 +366,7 @@ func errorHandler(resp *http.Response) error {
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -371,7 +381,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
|
||||
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
|
||||
if cs < opt.ChunkSize {
|
||||
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||
return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -391,21 +401,24 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.UploadCutoff < opt.ChunkSize {
|
||||
opt.UploadCutoff = opt.ChunkSize
|
||||
fs.Infof(nil, "b2: raising upload cutoff to chunk size: %v", opt.UploadCutoff)
|
||||
}
|
||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
||||
return nil, fmt.Errorf("b2: upload cutoff: %w", err)
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: chunk size")
|
||||
return nil, fmt.Errorf("b2: chunk size: %w", err)
|
||||
}
|
||||
if opt.Account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
@@ -416,20 +429,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
ci: ci,
|
||||
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
|
||||
cache: bucket.NewCache(),
|
||||
_bucketID: make(map[string]string, 1),
|
||||
_bucketType: make(map[string]string, 1),
|
||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
fs.Config.Transfers,
|
||||
ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
@@ -439,7 +454,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
// Set the test flag if required
|
||||
if opt.TestMode != "" {
|
||||
testMode := strings.TrimSpace(opt.TestMode)
|
||||
@@ -448,7 +463,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
err = f.authorizeAccount(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
@@ -457,7 +472,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.rootBucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.cache.MarkOK(f.rootBucket)
|
||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
||||
@@ -469,12 +484,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
// File doesn't exist so return old f
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
@@ -497,10 +509,10 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
|
||||
return f.shouldRetryNoReauth(resp, err)
|
||||
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to authenticate")
|
||||
return fmt.Errorf("failed to authenticate: %w", err)
|
||||
}
|
||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
return nil
|
||||
@@ -546,7 +558,7 @@ func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUp
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
@@ -702,7 +714,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote := file.Name[len(prefix):]
|
||||
// Check for directory
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
if isDirectory && len(remote) > 1 {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
if addBucket {
|
||||
@@ -1036,7 +1048,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.Wrap(err, "failed to create bucket")
|
||||
return fmt.Errorf("failed to create bucket: %w", err)
|
||||
}
|
||||
f.setBucketID(bucket, response.ID)
|
||||
f.setBucketType(bucket, response.Type)
|
||||
@@ -1071,7 +1083,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete bucket")
|
||||
return fmt.Errorf("failed to delete bucket: %w", err)
|
||||
}
|
||||
f.clearBucketID(bucket)
|
||||
f.clearBucketType(bucket)
|
||||
@@ -1112,7 +1124,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Wrapf(err, "failed to hide %q", bucketPath)
|
||||
return fmt.Errorf("failed to hide %q: %w", bucketPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1133,7 +1145,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to delete %q", Name)
|
||||
return fmt.Errorf("failed to delete %q: %w", Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1168,10 +1180,10 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
wg.Add(f.ci.Transfers)
|
||||
for i := 0; i < f.ci.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for object := range toBeDeleted {
|
||||
@@ -1183,7 +1195,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(err)
|
||||
tr.Done(ctx, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -1211,7 +1223,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
last = remote
|
||||
tr.Done(nil)
|
||||
tr.Done(ctx, nil)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
@@ -1234,7 +1246,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.purge(ctx, "", true)
|
||||
}
|
||||
|
||||
// copy does a server side copy from dstObj <- srcObj
|
||||
// copy does a server-side copy from dstObj <- srcObj
|
||||
//
|
||||
// If newInfo is nil then the metadata will be copied otherwise it
|
||||
// will be replaced with newInfo
|
||||
@@ -1291,7 +1303,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
return dstObj.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -1343,7 +1355,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
}
|
||||
var request = api.GetDownloadAuthorizationRequest{
|
||||
BucketID: bucketID,
|
||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.rootDirectory, remote)),
|
||||
ValidDurationInSeconds: validDurationInSeconds,
|
||||
}
|
||||
var response api.GetDownloadAuthorizationResponse
|
||||
@@ -1352,7 +1364,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get download authorization")
|
||||
return "", fmt.Errorf("failed to get download authorization: %w", err)
|
||||
}
|
||||
return response.AuthorizationToken, nil
|
||||
}
|
||||
@@ -1440,7 +1452,7 @@ func (o *Object) Size() int64 {
|
||||
// Make sure it is lower case
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (eg Cyberduck) use this
|
||||
// Some tools (e.g. Cyberduck) use this
|
||||
func cleanSHA1(sha1 string) (out string) {
|
||||
out = strings.ToLower(sha1)
|
||||
const unverified = "unverified:"
|
||||
@@ -1494,8 +1506,11 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
|
||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||
}
|
||||
|
||||
// getMetaData gets the metadata from the object unconditionally
|
||||
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
// getMetaDataListing gets the metadata from the object unconditionally from the listing
|
||||
//
|
||||
// Note that listing is a class C transaction which costs more than
|
||||
// the B transaction used in getMetaData
|
||||
func (o *Object) getMetaDataListing(ctx context.Context) (info *api.File, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
maxSearched := 1
|
||||
var timestamp api.Timestamp
|
||||
@@ -1528,6 +1543,19 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getMetaData gets the metadata from the object unconditionally
|
||||
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
// If using versions and have a version suffix, need to list the directory to find the correct versions
|
||||
if o.fs.opt.Versions {
|
||||
timestamp, _ := api.RemoveVersion(o.remote)
|
||||
if !timestamp.IsZero() {
|
||||
return o.getMetaDataListing(ctx)
|
||||
}
|
||||
}
|
||||
_, info, err = o.getOrHead(ctx, "HEAD", nil)
|
||||
return info, err
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
@@ -1641,14 +1669,14 @@ func (file *openFile) Close() (err error) {
|
||||
|
||||
// Check to see we read the correct number of bytes
|
||||
if file.o.Size() != file.bytes {
|
||||
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
||||
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
||||
}
|
||||
|
||||
// Check the SHA1
|
||||
receivedSHA1 := file.o.sha1
|
||||
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
||||
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
||||
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1657,12 +1685,11 @@ func (file *openFile) Close() (err error) {
|
||||
// Check it satisfies the interfaces
|
||||
var _ io.ReadCloser = &openFile{}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Options: options,
|
||||
Method: method,
|
||||
Options: options,
|
||||
NoResponse: method == "HEAD",
|
||||
}
|
||||
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
@@ -1680,37 +1707,74 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
bucket, bucketPath := o.split()
|
||||
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
// 404 for files, 400 for directories
|
||||
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
|
||||
return nil, nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
|
||||
}
|
||||
|
||||
// Parse the time out of the headers if possible
|
||||
err = o.parseTimeString(resp.Header.Get(timeHeader))
|
||||
// NB resp may be Open here - don't return err != nil without closing
|
||||
|
||||
// Convert the Headers into an api.File
|
||||
var uploadTimestamp api.Timestamp
|
||||
err = uploadTimestamp.UnmarshalJSON([]byte(resp.Header.Get(timestampHeader)))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timestampHeader+" header: %v", err)
|
||||
}
|
||||
var Info = make(map[string]string)
|
||||
for k, vs := range resp.Header {
|
||||
k = strings.ToLower(k)
|
||||
for _, v := range vs {
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
Info[k[len(headerPrefix):]] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
info = &api.File{
|
||||
ID: resp.Header.Get(idHeader),
|
||||
Name: resp.Header.Get(nameHeader),
|
||||
Action: "upload",
|
||||
Size: resp.ContentLength,
|
||||
UploadTimestamp: uploadTimestamp,
|
||||
SHA1: resp.Header.Get(sha1Header),
|
||||
ContentType: resp.Header.Get("Content-Type"),
|
||||
Info: Info,
|
||||
}
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
// length read from the listing.
|
||||
if info.Size < 0 {
|
||||
info.Size = o.size
|
||||
}
|
||||
return resp, info, nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
|
||||
resp, info, err := o.getOrHead(ctx, "GET", options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Don't check length or hash or metadata on partial content
|
||||
if resp.StatusCode == http.StatusPartialContent {
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
err = o.decodeMetaData(info)
|
||||
if err != nil {
|
||||
_ = resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
// Read sha1 from header if it isn't set
|
||||
if o.sha1 == "" {
|
||||
o.sha1 = resp.Header.Get(sha1Header)
|
||||
fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
|
||||
// if sha1 header is "none" (in big files), then need
|
||||
// to read it from the metadata
|
||||
if o.sha1 == "none" {
|
||||
o.sha1 = resp.Header.Get(sha1InfoHeader)
|
||||
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
|
||||
}
|
||||
o.sha1 = cleanSHA1(o.sha1)
|
||||
}
|
||||
// Don't check length or hash on partial content
|
||||
if resp.StatusCode == http.StatusPartialContent {
|
||||
return resp.Body, nil
|
||||
}
|
||||
return newOpenFile(o, resp), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -102,7 +101,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
@@ -185,7 +184,7 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
@@ -230,14 +229,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100 MB (100,000,000 bytes)
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -406,7 +405,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
up.f.putBuf(buf, false)
|
||||
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
|
||||
@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
|
||||
// Error is returned from box when things go wrong
|
||||
type Error struct {
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage `json:"context_info"`
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
@@ -61,7 +61,7 @@ func (e *Error) Error() string {
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
@@ -90,6 +90,12 @@ type Item struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
OwnedBy struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Login string `json:"login"`
|
||||
} `json:"owned_by"`
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
@@ -103,10 +109,11 @@ func (i *Item) ModTime() (t time.Time) {
|
||||
|
||||
// FolderItems is returned from the GetFolderItems call
|
||||
type FolderItems struct {
|
||||
TotalCount int `json:"total_count"`
|
||||
Entries []Item `json:"entries"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
TotalCount int `json:"total_count"`
|
||||
Entries []Item `json:"entries"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
NextMarker *string `json:"next_marker,omitempty"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
@@ -132,6 +139,38 @@ type UploadFile struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// PreUploadCheck is the request for upload preflight check
|
||||
type PreUploadCheck struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
Size *int64 `json:"size,omitempty"`
|
||||
}
|
||||
|
||||
// PreUploadCheckResponse is the response from upload preflight check
|
||||
// if successful
|
||||
type PreUploadCheckResponse struct {
|
||||
UploadToken string `json:"upload_token"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
}
|
||||
|
||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
||||
type PreUploadCheckConflict struct {
|
||||
Conflicts struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
FileVersion struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
Sha1 string `json:"sha1"`
|
||||
} `json:"file_version"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
Sha1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
} `json:"conflicts"`
|
||||
}
|
||||
|
||||
// UpdateFileModTime is used in Update File Info
|
||||
type UpdateFileModTime struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
|
||||
@@ -14,24 +14,19 @@ import (
|
||||
"crypto/rsa"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
|
||||
"github.com/youmark/pkcs8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -42,9 +37,13 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
@@ -57,7 +56,6 @@ const (
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api.box.com/2.0"
|
||||
uploadURL = "https://upload.box.com/api/2.0"
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
tokenURL = "https://api.box.com/oauth2/token"
|
||||
@@ -84,24 +82,24 @@ func init() {
|
||||
Name: "box",
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||
var err error
|
||||
// If using box config.json, use JWT auth
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
|
||||
}
|
||||
// Else, if not using an access token, use oauth2
|
||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||
err = oauthutil.Config("box", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
@@ -110,23 +108,23 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
|
||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\nLeave blank normally.",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "user",
|
||||
Help: "Rclone should act on behalf of a user",
|
||||
Help: "Rclone should act on behalf of a user.",
|
||||
}, {
|
||||
Value: "enterprise",
|
||||
Help: "Rclone should act on behalf of a service account",
|
||||
Help: "Rclone should act on behalf of a service account.",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -134,6 +132,16 @@ func init() {
|
||||
Help: "Max number of times to try committing a multipart file.",
|
||||
Default: 100,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
Help: "Size of listing chunk 1-1000.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "owned_by",
|
||||
Default: "",
|
||||
Help: "Only show items owned by the login (email address) passed in.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -153,23 +161,23 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return fmt.Errorf("get box config: %w", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return fmt.Errorf("get decrypted private key: %w", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return fmt.Errorf("get claims: %w", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(ctx)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
@@ -177,11 +185,11 @@ func refreshJWTToken(jsonFile string, boxSubType string, name string, m configma
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to read Box config")
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
}
|
||||
err = json.Unmarshal(file, &boxConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to parse Box config")
|
||||
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
|
||||
}
|
||||
return boxConfig, nil
|
||||
}
|
||||
@@ -189,7 +197,7 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
|
||||
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
@@ -230,12 +238,12 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if len(rest) > 0 {
|
||||
return nil, errors.Wrap(err, "box: extra data included in private key")
|
||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||
}
|
||||
|
||||
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to decrypt private key")
|
||||
return nil, fmt.Errorf("box: failed to decrypt private key: %w", err)
|
||||
}
|
||||
|
||||
return rsaKey.(*rsa.PrivateKey), nil
|
||||
@@ -248,6 +256,8 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
AccessToken string `config:"access_token"`
|
||||
ListChunk int `config:"list_chunk"`
|
||||
OwnedBy string `config:"owned_by"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
@@ -317,13 +327,23 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
authRetry := false
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") {
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
|
||||
// Box API errors which should be retries
|
||||
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "operation_blocked_temporary" {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -338,8 +358,8 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
info = item
|
||||
return true
|
||||
}
|
||||
@@ -372,8 +392,7 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -382,33 +401,34 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < minUploadCutoff {
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(ctx)
|
||||
var ts *oauthutil.TokenSource
|
||||
// If not using an accessToken, create an oauth client and tokensource
|
||||
if opt.AccessToken == "" {
|
||||
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
|
||||
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// If using an accessToken, set the Authorization header
|
||||
@@ -424,7 +444,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
@@ -463,7 +483,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&tempF)
|
||||
f.features.Fill(ctx, &tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
@@ -513,8 +533,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
found, err = f.listAll(ctx, pathID, true, false, true, func(item *api.Item) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
}
|
||||
@@ -548,7 +568,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -569,26 +589,29 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, activeOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/" + dirID + "/items",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
||||
offset := 0
|
||||
opts.Parameters.Set("limit", strconv.Itoa(f.opt.ListChunk))
|
||||
opts.Parameters.Set("usemarker", "true")
|
||||
var marker *string
|
||||
OUTER:
|
||||
for {
|
||||
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
||||
if marker != nil {
|
||||
opts.Parameters.Set("marker", *marker)
|
||||
}
|
||||
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
for i := range result.Entries {
|
||||
item := &result.Entries[i]
|
||||
@@ -604,7 +627,10 @@ OUTER:
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
if item.ItemStatus != api.ItemStatusActive {
|
||||
if activeOnly && item.ItemStatus != api.ItemStatusActive {
|
||||
continue
|
||||
}
|
||||
if f.opt.OwnedBy != "" && f.opt.OwnedBy != item.OwnedBy.Login {
|
||||
continue
|
||||
}
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
@@ -613,8 +639,8 @@ OUTER:
|
||||
break OUTER
|
||||
}
|
||||
}
|
||||
offset += result.Limit
|
||||
if offset >= result.TotalCount {
|
||||
marker = result.NextMarker
|
||||
if marker == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -636,7 +662,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -683,22 +709,80 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
return o, leaf, directoryID, nil
|
||||
}
|
||||
|
||||
// preUploadCheck checks to see if a file can be uploaded
|
||||
//
|
||||
// It returns "", nil if the file is good to go
|
||||
// It returns "ID", nil if the file must be updated
|
||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
||||
check := api.PreUploadCheck{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
}
|
||||
if size >= 0 {
|
||||
check.Size = &size
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "OPTIONS",
|
||||
Path: "/files/content/",
|
||||
}
|
||||
var result api.PreUploadCheckResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &check, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" {
|
||||
var conflict api.PreUploadCheckConflict
|
||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
||||
}
|
||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
|
||||
}
|
||||
return conflict.Conflicts.ID, nil
|
||||
}
|
||||
return "", fmt.Errorf("pre-upload check: %w", err)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src)
|
||||
default:
|
||||
// If directory doesn't exist, file doesn't exist so can upload
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Preflight check the upload, which returns the ID if the
|
||||
// object already exists
|
||||
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ID == "" {
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// If object exists then create a skeleton one with just id
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
id: ID,
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
@@ -740,7 +824,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -767,10 +851,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
return fmt.Errorf("rmdir failed: %w", err)
|
||||
}
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
@@ -791,7 +875,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -814,7 +898,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||
dstPath := f.rootSlash() + remote
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
@@ -839,7 +923,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -877,7 +961,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -895,10 +979,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read user info")
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
// FIXME max upload size would be useful to use in Update
|
||||
usage = &fs.Usage{
|
||||
@@ -909,7 +993,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -945,7 +1029,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1008,12 +1092,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return info.SharedLink.URL, err
|
||||
}
|
||||
|
||||
// deletePermanently permenently deletes a trashed file
|
||||
// deletePermanently permanently deletes a trashed file
|
||||
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
@@ -1026,51 +1110,42 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/trash/items",
|
||||
Parameters: url.Values{
|
||||
"fields": []string{"type", "id"},
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
||||
offset := 0
|
||||
for {
|
||||
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
||||
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list trash")
|
||||
}
|
||||
for i := range result.Entries {
|
||||
item := &result.Entries[i]
|
||||
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
|
||||
var (
|
||||
deleteErrors = int64(0)
|
||||
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
_, err = f.listAll(ctx, "trash", false, false, false, func(item *api.Item) bool {
|
||||
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
|
||||
wg.Add(1)
|
||||
concurrencyControl <- struct{}{}
|
||||
go func() {
|
||||
defer func() {
|
||||
<-concurrencyControl
|
||||
wg.Done()
|
||||
}()
|
||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete file")
|
||||
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
||||
atomic.AddInt64(&deleteErrors, 1)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
}
|
||||
offset += result.Limit
|
||||
if offset >= result.TotalCount {
|
||||
break
|
||||
}()
|
||||
} else {
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
}
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
if deleteErrors != 0 {
|
||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
|
||||
}
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
@@ -1124,8 +1199,11 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
if info.Type != api.ItemTypeFile {
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = int64(info.Size)
|
||||
@@ -1182,7 +1260,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
@@ -1215,7 +1293,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1225,7 +1303,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// upload does a single non-multipart upload
|
||||
//
|
||||
// This is recommended for less than 50 MB of content
|
||||
// This is recommended for less than 50 MiB of content
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
@@ -1255,13 +1333,13 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||
return errors.Errorf("failed to upload %v - not sure why", o)
|
||||
return fmt.Errorf("failed to upload %v - not sure why", o)
|
||||
}
|
||||
return o.setMetaData(&result.Entries[0])
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// multpart upload for box
|
||||
// multipart upload for box
|
||||
|
||||
package box
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -15,7 +16,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -44,7 +44,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts.Body = wrap(bytes.NewReader(chunk))
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -109,10 +109,10 @@ outer:
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
delay := defaultDelay
|
||||
var why string
|
||||
@@ -140,7 +140,7 @@ outer:
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||
return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
||||
@@ -151,7 +151,7 @@ outer:
|
||||
}
|
||||
err = json.Unmarshal(body, &result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
|
||||
return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -167,7 +167,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
return fmt.Errorf("multipart upload create session failed: %w", err)
|
||||
}
|
||||
chunkSize := session.PartSize
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||
@@ -222,7 +222,7 @@ outer:
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||
err = fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
break outer
|
||||
}
|
||||
|
||||
@@ -238,7 +238,7 @@ outer:
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
err = fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
@@ -266,11 +266,11 @@ outer:
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
return fmt.Errorf("multipart upload failed to finalize: %w", err)
|
||||
}
|
||||
|
||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||
return errors.Errorf("multipart upload failed %v - not sure why", o)
|
||||
return fmt.Errorf("multipart upload failed %v - not sure why", o)
|
||||
}
|
||||
return o.setMetaData(&result.Entries[0])
|
||||
}
|
||||
|
||||
131
backend/cache/cache.go
vendored
131
backend/cache/cache.go
vendored
@@ -1,9 +1,11 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -18,7 +20,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -68,26 +69,26 @@ func init() {
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "plex_url",
|
||||
Help: "The URL of the Plex server",
|
||||
Help: "The URL of the Plex server.",
|
||||
}, {
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user",
|
||||
Help: "The username of the Plex user.",
|
||||
}, {
|
||||
Name: "plex_password",
|
||||
Help: "The password of the Plex user",
|
||||
Help: "The password of the Plex user.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "plex_insecure",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
@@ -98,18 +99,18 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
|
||||
will need to be cleared or unexpected EOF errors will occur.`,
|
||||
Default: DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1m",
|
||||
Help: "1MB",
|
||||
Value: "1M",
|
||||
Help: "1 MiB",
|
||||
}, {
|
||||
Value: "5M",
|
||||
Help: "5 MB",
|
||||
Help: "5 MiB",
|
||||
}, {
|
||||
Value: "10M",
|
||||
Help: "10 MB",
|
||||
Help: "10 MiB",
|
||||
}},
|
||||
}, {
|
||||
Name: "info_age",
|
||||
Help: `How long to cache file structure information (directory listings, file size, times etc).
|
||||
Help: `How long to cache file structure information (directory listings, file size, times, etc.).
|
||||
If all write operations are done through the cache then you can safely make
|
||||
this value very large as the cache store will also be updated in real time.`,
|
||||
Default: DefCacheInfoAge,
|
||||
@@ -132,22 +133,22 @@ oldest chunks until it goes under this value.`,
|
||||
Default: DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "500M",
|
||||
Help: "500 MB",
|
||||
Help: "500 MiB",
|
||||
}, {
|
||||
Value: "1G",
|
||||
Help: "1 GB",
|
||||
Help: "1 GiB",
|
||||
}, {
|
||||
Value: "10G",
|
||||
Help: "10 GB",
|
||||
Help: "10 GiB",
|
||||
}},
|
||||
}, {
|
||||
Name: "db_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
|
||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
||||
Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
||||
Help: `Directory to cache chunk files.
|
||||
|
||||
Path to where partial file data (chunks) are stored locally. The remote
|
||||
@@ -167,6 +168,7 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: `How often should the cache perform cleanups of the chunk storage.
|
||||
|
||||
The default value should be ok for most people. If you find that the
|
||||
cache goes over "cache-chunk-total-size" too often then try to lower
|
||||
this value to force it to perform cleanups more often.`,
|
||||
@@ -220,7 +222,7 @@ available on the local machine.`,
|
||||
}, {
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: `Limits the number of requests per second to the source FS (-1 to disable)
|
||||
Help: `Limits the number of requests per second to the source FS (-1 to disable).
|
||||
|
||||
This setting places a hard limit on the number of requests per second
|
||||
that cache will be doing to the cloud provider remote and try to
|
||||
@@ -241,7 +243,7 @@ still pass.`,
|
||||
}, {
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: `Cache file data on writes through the FS
|
||||
Help: `Cache file data on writes through the FS.
|
||||
|
||||
If you need to read files immediately after you upload them through
|
||||
cache you can enable this flag to have their data stored in the
|
||||
@@ -262,7 +264,7 @@ provider`,
|
||||
}, {
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: `How long should files be stored in local cache before being uploaded
|
||||
Help: `How long should files be stored in local cache before being uploaded.
|
||||
|
||||
This is the duration that a file must wait in the temporary location
|
||||
_cache-tmp-upload-path_ before it is selected for upload.
|
||||
@@ -273,7 +275,7 @@ to start the upload if a queue formed for this purpose.`,
|
||||
}, {
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: `How long to wait for the DB to be available - 0 is unlimited
|
||||
Help: `How long to wait for the DB to be available - 0 is unlimited.
|
||||
|
||||
Only one process can have the DB open at any one time, so rclone waits
|
||||
for this duration for the DB to become available before it gives an
|
||||
@@ -339,8 +341,14 @@ func parseRootPath(path string) (string, error) {
|
||||
return strings.Trim(path, "/"), nil
|
||||
}
|
||||
|
||||
var warnDeprecated sync.Once
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
warnDeprecated.Do(func() {
|
||||
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
|
||||
})
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -348,7 +356,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
||||
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
||||
}
|
||||
|
||||
@@ -358,13 +366,13 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
rpath, err := parseRootPath(rootPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
|
||||
}
|
||||
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||
wrappedFs, wrapErr := cache.Get(remotePath)
|
||||
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
|
||||
}
|
||||
var fsErr error
|
||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||
@@ -393,7 +401,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.PlexToken != "" {
|
||||
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else {
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
@@ -405,7 +413,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -414,8 +422,8 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
dbPath := f.opt.DbPath
|
||||
chunkPath := f.opt.ChunkPath
|
||||
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
||||
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
||||
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
||||
if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
|
||||
chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
|
||||
chunkPath = dbPath
|
||||
}
|
||||
if filepath.Ext(dbPath) != "" {
|
||||
@@ -426,11 +434,11 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
err = os.MkdirAll(dbPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
|
||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
|
||||
}
|
||||
err = os.MkdirAll(chunkPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
|
||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
|
||||
}
|
||||
|
||||
dbPath = filepath.Join(dbPath, name+".db")
|
||||
@@ -442,7 +450,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
DbWaitTime: time.Duration(opt.DbWaitTime),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to start cache db")
|
||||
return nil, fmt.Errorf("failed to start cache db: %w", err)
|
||||
}
|
||||
// Trap SIGINT and SIGTERM to close the DB handle gracefully
|
||||
c := make(chan os.Signal, 1)
|
||||
@@ -476,12 +484,12 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if f.opt.TempWritePath != "" {
|
||||
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
|
||||
}
|
||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||
f.tempFs, err = cache.Get(f.opt.TempWritePath)
|
||||
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||
return nil, fmt.Errorf("failed to create temp fs: %v: %w", err, err)
|
||||
}
|
||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||
@@ -506,13 +514,13 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||
pollInterval := make(chan time.Duration, 1)
|
||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
|
||||
doChangeNotify(ctx, f.receiveChangeNotify, pollInterval)
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
DuplicateFiles: false, // storage doesn't permit this
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// override only those features that use a temp fs and it doesn't support them
|
||||
//f.features.ChangeNotify = f.ChangeNotify
|
||||
if f.opt.TempWritePath != "" {
|
||||
@@ -581,7 +589,7 @@ Some valid examples are:
|
||||
"0:10" -> the first ten chunks
|
||||
|
||||
Any parameter with a key that starts with "file" can be used to
|
||||
specify files to fetch, eg
|
||||
specify files to fetch, e.g.
|
||||
|
||||
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
|
||||
|
||||
@@ -598,7 +606,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
|
||||
out = make(rc.Params)
|
||||
m, err := f.Stats()
|
||||
if err != nil {
|
||||
return out, errors.Errorf("error while getting cache stats")
|
||||
return out, fmt.Errorf("error while getting cache stats")
|
||||
}
|
||||
out["status"] = "ok"
|
||||
out["stats"] = m
|
||||
@@ -625,7 +633,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
out = make(rc.Params)
|
||||
remoteInt, ok := in["remote"]
|
||||
if !ok {
|
||||
return out, errors.Errorf("remote is needed")
|
||||
return out, fmt.Errorf("remote is needed")
|
||||
}
|
||||
remote := remoteInt.(string)
|
||||
withData := false
|
||||
@@ -636,7 +644,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
|
||||
remote = f.unwrapRemote(remote)
|
||||
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
||||
return out, errors.Errorf("%s doesn't exist in cache", remote)
|
||||
return out, fmt.Errorf("%s doesn't exist in cache", remote)
|
||||
}
|
||||
|
||||
co := NewObject(f, remote)
|
||||
@@ -645,7 +653,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
cd := NewDirectory(f, remote)
|
||||
err := f.cache.ExpireDir(cd)
|
||||
if err != nil {
|
||||
return out, errors.WithMessage(err, "error expiring directory")
|
||||
return out, fmt.Errorf("error expiring directory: %w", err)
|
||||
}
|
||||
// notify vfs too
|
||||
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
|
||||
@@ -656,7 +664,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
||||
// expire the entry
|
||||
err = f.cache.ExpireObject(co, withData)
|
||||
if err != nil {
|
||||
return out, errors.WithMessage(err, "error expiring file")
|
||||
return out, fmt.Errorf("error expiring file: %w", err)
|
||||
}
|
||||
// notify vfs too
|
||||
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
|
||||
@@ -677,24 +685,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
case 1:
|
||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
}
|
||||
end = start + 1
|
||||
case 2:
|
||||
if ints[0] != "" {
|
||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
}
|
||||
}
|
||||
if ints[1] != "" {
|
||||
end, err = strconv.ParseInt(ints[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("invalid range: %q", part)
|
||||
return nil, fmt.Errorf("invalid range: %q", part)
|
||||
}
|
||||
crs = append(crs, chunkRange{start: start, end: end})
|
||||
}
|
||||
@@ -749,18 +757,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
delete(in, "chunks")
|
||||
crs, err := parseChunks(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid chunks parameter")
|
||||
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
|
||||
}
|
||||
var files [][2]string
|
||||
for k, v := range in {
|
||||
if !strings.HasPrefix(k, "file") {
|
||||
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
||||
}
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
files = append(files, [2]string{v, f.unwrapRemote(v)})
|
||||
default:
|
||||
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
||||
}
|
||||
}
|
||||
type fileStatus struct {
|
||||
@@ -1116,7 +1124,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
case fs.Directory:
|
||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||
default:
|
||||
return errors.Errorf("Unknown object type %T", entry)
|
||||
return fmt.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1236,7 +1244,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
||||
|
||||
@@ -1517,7 +1525,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return f.put(ctx, in, src, options, do)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
||||
|
||||
@@ -1594,7 +1602,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return co, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
||||
|
||||
@@ -1895,6 +1903,16 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
do := f.Fs.Features().Shutdown
|
||||
if do == nil {
|
||||
return nil
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "stats",
|
||||
@@ -1939,4 +1957,5 @@ var (
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
)
|
||||
|
||||
68
backend/cache/cache_internal_test.go
vendored
68
backend/cache/cache_internal_test.go
vendored
@@ -1,5 +1,5 @@
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -16,12 +17,12 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
@@ -293,6 +294,9 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
@@ -442,7 +446,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
return err
|
||||
}
|
||||
if coSize != expectedSize {
|
||||
return errors.Errorf("%v <> %v", coSize, expectedSize)
|
||||
return fmt.Errorf("%v <> %v", coSize, expectedSize)
|
||||
}
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
@@ -498,7 +502,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
}
|
||||
if len(li) != 2 {
|
||||
log.Printf("not expected listing /test: %v", li)
|
||||
return errors.Errorf("not expected listing /test: %v", li)
|
||||
return fmt.Errorf("not expected listing /test: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/one")
|
||||
@@ -508,7 +512,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
}
|
||||
if len(li) != 0 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return errors.Errorf("not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/second")
|
||||
@@ -518,21 +522,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/second: %v", li)
|
||||
return errors.Errorf("not expected listing /test/second: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "data.bin" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return errors.Errorf("not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/second/data.bin" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return errors.Errorf("not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return errors.Errorf("unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
|
||||
log.Printf("complete listing: %v", li)
|
||||
@@ -587,17 +591,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
if !found {
|
||||
log.Printf("not found /test")
|
||||
return errors.Errorf("not found /test")
|
||||
return fmt.Errorf("not found /test")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one")
|
||||
return errors.Errorf("not found /test/one")
|
||||
return fmt.Errorf("not found /test/one")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one/test2")
|
||||
return errors.Errorf("not found /test/one/test2")
|
||||
return fmt.Errorf("not found /test/one/test2")
|
||||
}
|
||||
li, err := runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
@@ -606,21 +610,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return errors.Errorf("not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "test2" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return errors.Errorf("not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/one/test2" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return errors.Errorf("not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return errors.Errorf("unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
log.Printf("complete listing /test/one/test2")
|
||||
return nil
|
||||
@@ -681,6 +685,9 @@ func TestInternalCacheWrites(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
@@ -836,7 +843,7 @@ func newRun() *run {
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create temp dir: %v", err)
|
||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||
}
|
||||
} else {
|
||||
r.tmpUploadDir = uploadDir
|
||||
@@ -892,7 +899,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
m.Set("type", "cache")
|
||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||
} else {
|
||||
remoteType := config.FileGet(remote, "type", "")
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
@@ -903,14 +910,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
m.Set("password", cryptPassword1)
|
||||
m.Set("password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := config.FileGet(remote, "remote", "")
|
||||
remoteRemote := config.FileGet(remote, "remote")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := config.FileGet(remoteWrapping, "type", "")
|
||||
remoteType := config.FileGet(remoteWrapping, "type")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
@@ -919,20 +926,21 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
}
|
||||
runInstance.rootIsCrypt = rootIsCrypt
|
||||
runInstance.dbPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
|
||||
runInstance.chunkPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
|
||||
runInstance.vfsCachePath = filepath.Join(config.CacheDir, "vfs", remote)
|
||||
runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
|
||||
runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
|
||||
runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
|
||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
fs.Config.LowLevelRetries = 1
|
||||
ci := fs.GetConfig(context.Background())
|
||||
ci.LowLevelRetries = 1
|
||||
|
||||
// Instantiate root
|
||||
if purge {
|
||||
boltDb.PurgeTempUploads()
|
||||
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
||||
}
|
||||
f, err := cache.NewFs(remote, id, m)
|
||||
f, err := cache.NewFs(context.Background(), remote, id, m)
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
@@ -1033,7 +1041,7 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
|
||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
|
||||
obj, err = f.Put(context.Background(), in1, objInfo1)
|
||||
_, err = f.Put(context.Background(), in1, objInfo1)
|
||||
require.NoError(t, err)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
@@ -1054,7 +1062,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
||||
|
||||
if !noLengthCheck && size != int64(len(checkSample)) {
|
||||
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||
}
|
||||
return checkSample, nil
|
||||
}
|
||||
@@ -1249,7 +1257,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
||||
case state = <-buCh:
|
||||
// continue
|
||||
case <-time.After(maxDuration):
|
||||
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
|
||||
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
|
||||
return
|
||||
}
|
||||
checkRemote := state.Remote
|
||||
@@ -1266,7 +1274,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
||||
return
|
||||
}
|
||||
}
|
||||
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
|
||||
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
|
||||
}()
|
||||
return waitCh
|
||||
}
|
||||
|
||||
4
backend/cache/cache_test.go
vendored
4
backend/cache/cache_test.go
vendored
@@ -1,7 +1,7 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
1
backend/cache/cache_unsupported.go
vendored
1
backend/cache/cache_unsupported.go
vendored
@@ -1,6 +1,7 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package cache
|
||||
|
||||
4
backend/cache/cache_upload_test.go
vendored
4
backend/cache/cache_upload_test.go
vendored
@@ -1,5 +1,5 @@
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
7
backend/cache/handle.go
vendored
7
backend/cache/handle.go
vendored
@@ -1,9 +1,11 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
@@ -12,7 +14,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
@@ -242,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("chunk not found %v", chunkStart)
|
||||
return nil, fmt.Errorf("chunk not found %v", chunkStart)
|
||||
}
|
||||
|
||||
// first chunk will be aligned with the start
|
||||
@@ -322,7 +323,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||
r.offset = r.cachedObject.Size() + offset
|
||||
default:
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
|
||||
15
backend/cache/object.go
vendored
15
backend/cache/object.go
vendored
@@ -1,15 +1,16 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -177,10 +178,14 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
||||
}
|
||||
if o.isTempFile() {
|
||||
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
|
||||
}
|
||||
} else {
|
||||
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||
@@ -252,7 +257,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return errors.Errorf("%v is currently uploading, can't update", o)
|
||||
return fmt.Errorf("%v is currently uploading, can't update", o)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||
@@ -291,7 +296,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||
return fmt.Errorf("%v is currently uploading, can't delete", o)
|
||||
}
|
||||
}
|
||||
err := o.Object.Remove(ctx)
|
||||
|
||||
1
backend/cache/plex.go
vendored
1
backend/cache/plex.go
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
5
backend/cache/storage_memory.go
vendored
5
backend/cache/storage_memory.go
vendored
@@ -1,14 +1,15 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -52,7 +53,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
|
||||
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
|
||||
}
|
||||
|
||||
// AddChunk adds a new chunk of a cached object
|
||||
|
||||
84
backend/cache/storage_persistent.go
vendored
84
backend/cache/storage_persistent.go
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -119,11 +119,11 @@ func (b *Persistent) connect() error {
|
||||
|
||||
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
|
||||
}
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
|
||||
}
|
||||
if b.features.PurgeDb {
|
||||
b.Purge()
|
||||
@@ -175,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(remote, false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", remote)
|
||||
return fmt.Errorf("couldn't open bucket (%v)", remote)
|
||||
}
|
||||
|
||||
data := bucket.Get([]byte("."))
|
||||
@@ -183,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
||||
return json.Unmarshal(data, cd)
|
||||
}
|
||||
|
||||
return errors.Errorf("%v not found", remote)
|
||||
return fmt.Errorf("%v not found", remote)
|
||||
})
|
||||
|
||||
return cd, err
|
||||
@@ -208,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
||||
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
||||
}
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||
}
|
||||
|
||||
for _, cachedDir := range cachedDirs {
|
||||
@@ -225,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
||||
|
||||
encoded, err := json.Marshal(cachedDir)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
}
|
||||
err = b.Put([]byte("."), encoded)
|
||||
if err != nil {
|
||||
@@ -243,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
||||
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
||||
}
|
||||
|
||||
val := bucket.Get([]byte("."))
|
||||
if val != nil {
|
||||
err := json.Unmarshal(val, cachedDir)
|
||||
if err != nil {
|
||||
return errors.Errorf("error during unmarshalling obj: %v", err)
|
||||
return fmt.Errorf("error during unmarshalling obj: %v", err)
|
||||
}
|
||||
} else {
|
||||
return errors.Errorf("missing cached dir: %v", cachedDir)
|
||||
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
||||
}
|
||||
|
||||
c := bucket.Cursor()
|
||||
@@ -268,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
||||
// we try to find a cached meta for the dir
|
||||
currentBucket := c.Bucket().Bucket(k)
|
||||
if currentBucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", string(k))
|
||||
return fmt.Errorf("couldn't open bucket (%v)", string(k))
|
||||
}
|
||||
|
||||
metaKey := currentBucket.Get([]byte("."))
|
||||
@@ -317,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", fp)
|
||||
return fmt.Errorf("couldn't open bucket (%v)", fp)
|
||||
}
|
||||
// delete the cached dir
|
||||
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
||||
@@ -377,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
|
||||
return b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
||||
}
|
||||
val := bucket.Get([]byte(cachedObject.Name))
|
||||
if val != nil {
|
||||
return json.Unmarshal(val, cachedObject)
|
||||
}
|
||||
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
|
||||
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -392,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
|
||||
}
|
||||
// cache Object Info
|
||||
encoded, err := json.Marshal(cachedObject)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -413,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
||||
}
|
||||
err := bucket.Delete([]byte(cleanPath(objName)))
|
||||
if err != nil {
|
||||
@@ -445,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(dir, false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", remote)
|
||||
return fmt.Errorf("couldn't open parent bucket for %v", remote)
|
||||
}
|
||||
if f := bucket.Bucket([]byte(name)); f != nil {
|
||||
return nil
|
||||
@@ -454,7 +454,7 @@ func (b *Persistent) HasEntry(remote string) bool {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Errorf("couldn't find object (%v)", remote)
|
||||
return fmt.Errorf("couldn't find object (%v)", remote)
|
||||
})
|
||||
if err == nil {
|
||||
return true
|
||||
@@ -554,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||
if dataTsBucket == nil {
|
||||
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||
return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||
}
|
||||
// iterate through ts
|
||||
c := dataTsBucket.Cursor()
|
||||
@@ -732,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Errorf("not found %v-%v", path, offset)
|
||||
return fmt.Errorf("not found %v-%v", path, offset)
|
||||
})
|
||||
|
||||
return t, err
|
||||
@@ -772,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
tempObj := &tempUploadInfo{
|
||||
DestPath: destPath,
|
||||
@@ -783,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
// cache Object Info
|
||||
encoded, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -802,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
c := bucket.Cursor()
|
||||
@@ -835,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Errorf("no pending upload found")
|
||||
return fmt.Errorf("no pending upload found")
|
||||
})
|
||||
|
||||
return destPath, err
|
||||
@@ -846,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(tempBucket))
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
var tempObj = &tempUploadInfo{}
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
}
|
||||
|
||||
started = tempObj.Started
|
||||
@@ -868,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(tempBucket))
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
c := bucket.Cursor()
|
||||
@@ -898,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
var tempObj = &tempUploadInfo{}
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
}
|
||||
tempObj.Started = false
|
||||
v2, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -926,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
return bucket.Delete([]byte(remote))
|
||||
})
|
||||
@@ -941,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||
}
|
||||
|
||||
var tempObj = &tempUploadInfo{}
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
}
|
||||
if tempObj.Started {
|
||||
return errors.Errorf("pending upload already started %v", remote)
|
||||
return fmt.Errorf("pending upload already started %v", remote)
|
||||
}
|
||||
err = fn(tempObj)
|
||||
if err != nil {
|
||||
@@ -969,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
||||
}
|
||||
v2, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||
if err != nil {
|
||||
return errors.Errorf("pending upload not updated %v", err)
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1014,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
|
||||
// cache Object Info
|
||||
encoded, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,10 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
@@ -32,11 +35,35 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||
Size: int64(kilobytes) * int64(fs.Kibi),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type settings map[string]interface{}
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
configMap := configmap.Simple{}
|
||||
for key, val := range opts {
|
||||
configMap[key] = fmt.Sprintf("%v", val)
|
||||
}
|
||||
rpath := fspath.JoinRootPath(f.Root(), path)
|
||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
|
||||
fixFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
return fixFs
|
||||
}
|
||||
|
||||
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
|
||||
// test chunk name parser
|
||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||
saveOpt := f.opt
|
||||
@@ -467,9 +494,15 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
return obj
|
||||
}
|
||||
billyObj := newFile("billy")
|
||||
billyTxn := billyObj.(*Object).xactID
|
||||
if f.useNoRename {
|
||||
require.True(t, billyTxn != "")
|
||||
} else {
|
||||
require.True(t, billyTxn == "")
|
||||
}
|
||||
|
||||
billyChunkName := func(chunkNo int) string {
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn)
|
||||
}
|
||||
|
||||
err := f.Mkdir(ctx, billyChunkName(1))
|
||||
@@ -486,11 +519,13 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
// accessing chunks in strict mode is prohibited
|
||||
f.opt.FailHard = true
|
||||
billyChunk4Name := billyChunkName(4)
|
||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
||||
_, err = f.base.NewObject(ctx, billyChunk4Name)
|
||||
require.NoError(t, err)
|
||||
_, err = f.NewObject(ctx, billyChunk4Name)
|
||||
assertOverlapError(err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
|
||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, billyChunk4)
|
||||
|
||||
@@ -519,7 +554,8 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
|
||||
// recreate billy in case it was anyhow corrupted
|
||||
willyObj := newFile("willy")
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
|
||||
willyTxn := willyObj.(*Object).xactID
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn)
|
||||
f.opt.FailHard = false
|
||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
||||
f.opt.FailHard = true
|
||||
@@ -560,17 +596,20 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
contents := random.String(100)
|
||||
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
||||
filename = path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
||||
txnID = chunkObj.xactID
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
f.opt.FailHard = false
|
||||
file, fileName := newFile(f, "wreaker")
|
||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
|
||||
file, fileName, fileTxn := newFile(f, "wreaker")
|
||||
wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn))
|
||||
|
||||
f.opt.FailHard = false
|
||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
||||
@@ -604,22 +643,13 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
|
||||
runSubtest := func(contents, name string) {
|
||||
description := fmt.Sprintf("file with %s metadata", name)
|
||||
filename := path.Join(dir, name)
|
||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||
|
||||
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
|
||||
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err, "access "+description)
|
||||
@@ -649,7 +679,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
}
|
||||
}
|
||||
|
||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
|
||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "")
|
||||
require.NoError(t, err)
|
||||
todaysMeta := string(metaData)
|
||||
runSubtest(todaysMeta, "today")
|
||||
@@ -663,6 +693,212 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
runSubtest(futureMeta, "future")
|
||||
}
|
||||
|
||||
// Test that chunker refuses to change on objects with future/unknown metadata
|
||||
func testFutureProof(t *testing.T, f *Fs) {
|
||||
if !f.useMeta {
|
||||
t.Skip("this test requires metadata support")
|
||||
}
|
||||
|
||||
saveOpt := f.opt
|
||||
ctx := context.Background()
|
||||
f.opt.FailHard = true
|
||||
const dir = "future"
|
||||
const file = dir + "/test"
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
putPart := func(name string, part int, data, msg string) {
|
||||
if part > 0 {
|
||||
name = f.makeChunkName(name, part-1, "", "")
|
||||
}
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||
assert.NotNil(t, obj, msg)
|
||||
}
|
||||
|
||||
// simulate chunked object from future
|
||||
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
|
||||
putPart(file, 0, meta, "metaobject")
|
||||
putPart(file, 1, "abc", "chunk1")
|
||||
putPart(file, 2, "def", "chunk2")
|
||||
putPart(file, 3, "ghi", "chunk3")
|
||||
|
||||
// List should succeed
|
||||
ls, err := f.List(ctx, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(ls))
|
||||
assert.Equal(t, int64(9), ls[0].Size())
|
||||
|
||||
// NewObject should succeed
|
||||
obj, err := f.NewObject(ctx, file)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, file, obj.Remote())
|
||||
assert.Equal(t, int64(9), obj.Size())
|
||||
|
||||
// Hash must fail
|
||||
_, err = obj.Hash(ctx, hash.SHA1)
|
||||
assert.Equal(t, ErrMetaUnknown, err)
|
||||
|
||||
// Move must fail
|
||||
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
|
||||
assert.Nil(t, mobj)
|
||||
assert.Error(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "please upgrade rclone")
|
||||
}
|
||||
|
||||
// Put must fail
|
||||
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
|
||||
buf := bytes.NewBufferString("abc")
|
||||
_, err = f.Put(ctx, buf, oi)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Rcat must fail
|
||||
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||
assert.Nil(t, robj)
|
||||
assert.NotNil(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "please upgrade rclone")
|
||||
}
|
||||
}
|
||||
|
||||
// The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming
|
||||
// If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly
|
||||
func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
||||
if !f.useMeta {
|
||||
t.Skip("Can't do norename transactions without metadata")
|
||||
}
|
||||
const dir = "backcomp"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
saveUseNoRename := f.useNoRename
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
f.useNoRename = saveUseNoRename
|
||||
}()
|
||||
f.opt.ChunkSize = fs.SizeSuffix(10)
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
contents := random.String(250)
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
}
|
||||
|
||||
f.opt.FailHard = false
|
||||
f.useNoRename = false
|
||||
file, fileName := newFile(f, "renamefile")
|
||||
|
||||
f.opt.FailHard = false
|
||||
item := fstest.NewItem(fileName, contents, modTime)
|
||||
|
||||
var items []fstest.Item
|
||||
items = append(items, item)
|
||||
|
||||
f.useNoRename = true
|
||||
fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision())
|
||||
_, err := f.NewObject(ctx, fileName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.List(ctx, dir)
|
||||
assert.NoError(t, err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
_ = file.Remove(ctx)
|
||||
}
|
||||
|
||||
func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
if !f.useMeta {
|
||||
t.Skip("Can't test norename transactions without metadata")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
const dir = "servermovetest"
|
||||
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir)
|
||||
|
||||
subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1")
|
||||
assert.NoError(t, err)
|
||||
fs1, isChunkerFs := subFs1.(*Fs)
|
||||
assert.True(t, isChunkerFs)
|
||||
fs1.useNoRename = false
|
||||
fs1.opt.ChunkSize = fs.SizeSuffix(3)
|
||||
|
||||
subFs2, err := fs.NewFs(ctx, subRemote+"/subdir2")
|
||||
assert.NoError(t, err)
|
||||
fs2, isChunkerFs := subFs2.(*Fs)
|
||||
assert.True(t, isChunkerFs)
|
||||
fs2.useNoRename = true
|
||||
fs2.opt.ChunkSize = fs.SizeSuffix(3)
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
||||
contents := "abcdef"
|
||||
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||
|
||||
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
||||
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(len(contents)), dstFile.Size())
|
||||
|
||||
r, err := dstFile.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
}
|
||||
|
||||
// Test that md5all creates metadata even for small files
|
||||
func testMD5AllSlow(t *testing.T, f *Fs) {
|
||||
ctx := context.Background()
|
||||
fsResult := deriveFs(ctx, t, f, "md5all", settings{
|
||||
"chunk_size": "1P",
|
||||
"name_format": "*.#",
|
||||
"hash_type": "md5all",
|
||||
"transactions": "rename",
|
||||
"meta_format": "simplejson",
|
||||
})
|
||||
chunkFs, ok := fsResult.(*Fs)
|
||||
require.True(t, ok, "fs must be a chunker remote")
|
||||
baseFs := chunkFs.base
|
||||
if !baseFs.Features().SlowHash {
|
||||
t.Skipf("this test needs a base fs with slow hash, e.g. local")
|
||||
}
|
||||
|
||||
assert.True(t, chunkFs.useMD5, "must use md5")
|
||||
assert.True(t, chunkFs.hashAll, "must hash all files")
|
||||
|
||||
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
|
||||
obj, err := chunkFs.NewObject(ctx, "file")
|
||||
require.NoError(t, err)
|
||||
sum, err := obj.Hash(ctx, hash.MD5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
|
||||
|
||||
list, err := baseFs.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(list))
|
||||
_, err = baseFs.NewObject(ctx, "file")
|
||||
assert.NoError(t, err, "metadata must be created")
|
||||
_, err = baseFs.NewObject(ctx, "file.1")
|
||||
assert.NoError(t, err, "first chunk must be created")
|
||||
|
||||
require.NoError(t, operations.Purge(ctx, baseFs, ""))
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PutLarge", func(t *testing.T) {
|
||||
@@ -686,6 +922,18 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("MetadataInput", func(t *testing.T) {
|
||||
testMetadataInput(t, f)
|
||||
})
|
||||
t.Run("FutureProof", func(t *testing.T) {
|
||||
testFutureProof(t, f)
|
||||
})
|
||||
t.Run("BackwardsCompatibility", func(t *testing.T) {
|
||||
testBackwardsCompatibility(t, f)
|
||||
})
|
||||
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
||||
testChunkerServerSideMove(t, f)
|
||||
})
|
||||
t.Run("MD5AllSlow", func(t *testing.T) {
|
||||
testMD5AllSlow(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -15,10 +15,10 @@ import (
|
||||
|
||||
// Command line flags
|
||||
var (
|
||||
// Invalid characters are not supported by some remotes, eg. Mailru.
|
||||
// Invalid characters are not supported by some remotes, e.g. Mailru.
|
||||
// We enable testing with invalid characters when -remote is not set, so
|
||||
// chunker overlays a local directory, but invalid characters are disabled
|
||||
// by default when -remote is set, eg. when test_all runs backend tests.
|
||||
// by default when -remote is set, e.g. when test_all runs backend tests.
|
||||
// You can still test with invalid characters using the below flag.
|
||||
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
||||
)
|
||||
|
||||
1
backend/compress/.gitignore
vendored
Normal file
1
backend/compress/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
test
|
||||
1416
backend/compress/compress.go
Normal file
1416
backend/compress/compress.go
Normal file
File diff suppressed because it is too large
Load Diff
65
backend/compress/compress_test.go
Normal file
65
backend/compress/compress_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Test Crypt filesystem interface
|
||||
package compress
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
UnimplementableObjectMethods: []string{}}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
// TestRemoteGzip tests GZIP compression
|
||||
func TestRemoteGzip(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||
name := "TestCompressGzip"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -7,17 +7,19 @@ import (
|
||||
gocipher "crypto/cipher"
|
||||
"crypto/rand"
|
||||
"encoding/base32"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
@@ -92,12 +94,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
|
||||
case "obfuscate":
|
||||
mode = NameEncryptionObfuscated
|
||||
default:
|
||||
err = errors.Errorf("Unknown file name encryption mode %q", s)
|
||||
err = fmt.Errorf("Unknown file name encryption mode %q", s)
|
||||
}
|
||||
return mode, err
|
||||
}
|
||||
|
||||
// String turns mode into a human readable string
|
||||
// String turns mode into a human-readable string
|
||||
func (mode NameEncryptionMode) String() (out string) {
|
||||
switch mode {
|
||||
case NameEncryptionOff:
|
||||
@@ -147,7 +149,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
// If salt is "" we use a fixed salt just to make attackers lives
|
||||
// slighty harder than using no salt.
|
||||
//
|
||||
// Note that empty passsword makes all 0x00 keys which is used in the
|
||||
// Note that empty password makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
func (c *Cipher) Key(password, salt string) (err error) {
|
||||
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
||||
@@ -442,11 +444,32 @@ func (c *Cipher) encryptFileName(in string) string {
|
||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip version string so that only the non-versioned part
|
||||
// of the file name gets encrypted/obfuscated
|
||||
hasVersion := false
|
||||
var t time.Time
|
||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
||||
var s string
|
||||
t, s = version.Remove(segments[i])
|
||||
// version.Remove can fail, in which case it returns segments[i]
|
||||
if s != segments[i] {
|
||||
segments[i] = s
|
||||
hasVersion = true
|
||||
}
|
||||
}
|
||||
|
||||
if c.mode == NameEncryptionStandard {
|
||||
segments[i] = c.encryptSegment(segments[i])
|
||||
} else {
|
||||
segments[i] = c.obfuscateSegment(segments[i])
|
||||
}
|
||||
|
||||
// Add back a version to the encrypted/obfuscated
|
||||
// file name, if we stripped it off earlier
|
||||
if hasVersion {
|
||||
segments[i] = version.Add(segments[i], t)
|
||||
}
|
||||
}
|
||||
return strings.Join(segments, "/")
|
||||
}
|
||||
@@ -477,6 +500,21 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip version string so that only the non-versioned part
|
||||
// of the file name gets decrypted/deobfuscated
|
||||
hasVersion := false
|
||||
var t time.Time
|
||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
||||
var s string
|
||||
t, s = version.Remove(segments[i])
|
||||
// version.Remove can fail, in which case it returns segments[i]
|
||||
if s != segments[i] {
|
||||
segments[i] = s
|
||||
hasVersion = true
|
||||
}
|
||||
}
|
||||
|
||||
if c.mode == NameEncryptionStandard {
|
||||
segments[i], err = c.decryptSegment(segments[i])
|
||||
} else {
|
||||
@@ -486,6 +524,12 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Add back a version to the decrypted/deobfuscated
|
||||
// file name, if we stripped it off earlier
|
||||
if hasVersion {
|
||||
segments[i] = version.Add(segments[i], t)
|
||||
}
|
||||
}
|
||||
return strings.Join(segments, "/"), nil
|
||||
}
|
||||
@@ -494,10 +538,18 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
|
||||
return in[:remainingLength], nil
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
decrypted := in[:remainingLength]
|
||||
if version.Match(decrypted) {
|
||||
_, unversioned := version.Remove(decrypted)
|
||||
if unversioned == "" {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
}
|
||||
// Leave the version string on, if it was there
|
||||
return decrypted, nil
|
||||
}
|
||||
return c.decryptFileName(in)
|
||||
}
|
||||
@@ -528,7 +580,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
||||
func (n *nonce) fromReader(in io.Reader) error {
|
||||
read, err := io.ReadFull(in, (*n)[:])
|
||||
if read != fileNonceSize {
|
||||
return errors.Wrap(err, "short read of nonce")
|
||||
return fmt.Errorf("short read of nonce: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -633,11 +685,8 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
// possibly err != nil here, but we will process the
|
||||
// data and the next call to ReadFull will return 0, err
|
||||
// Write nonce to start of block
|
||||
copy(fh.buf, fh.nonce[:])
|
||||
// Encrypt the block using the nonce
|
||||
block := fh.buf
|
||||
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = blockHeaderSize + n
|
||||
fh.nonce.increment()
|
||||
@@ -782,8 +831,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
return ErrorEncryptedFileBadHeader
|
||||
}
|
||||
// Decrypt the block using the nonce
|
||||
block := fh.buf
|
||||
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
if !ok {
|
||||
if err != nil {
|
||||
return err // return pending error as it is likely more accurate
|
||||
@@ -908,7 +956,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
|
||||
// Re-open the underlying object with the offset given
|
||||
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
||||
if err != nil {
|
||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||
return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
|
||||
}
|
||||
|
||||
// Set the file handle
|
||||
|
||||
@@ -4,13 +4,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base32"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -160,22 +160,29 @@ func TestEncryptFileName(t *testing.T) {
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Standard mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
// Obfuscation mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
||||
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
}
|
||||
@@ -194,14 +201,19 @@ func TestDecryptFileName(t *testing.T) {
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
@@ -625,7 +637,7 @@ func (r *randomSource) Read(p []byte) (n int, err error) {
|
||||
func (r *randomSource) Write(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
if p[i] != r.next() {
|
||||
return 0, errors.Errorf("Error in stream at %d", r.counter)
|
||||
return 0, fmt.Errorf("Error in stream at %d", r.counter)
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
|
||||
@@ -3,13 +3,13 @@ package crypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -30,7 +30,7 @@ func init() {
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "filename_encryption",
|
||||
@@ -39,13 +39,13 @@ func init() {
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "standard",
|
||||
Help: "Encrypt the filenames see the docs for the details.",
|
||||
Help: "Encrypt the filenames.\nSee the docs for the details.",
|
||||
}, {
|
||||
Value: "obfuscate",
|
||||
Help: "Very simple filename obfuscation.",
|
||||
}, {
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -71,12 +71,12 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server side operations (eg copy) to work across different crypt configs.
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
|
||||
Normally this option is not what you want, but if you have two crypts
|
||||
pointing to the same backend you can use it.
|
||||
@@ -101,6 +101,21 @@ names, or for debugging purposes.`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_data_encryption",
|
||||
Help: "Option to either encrypt file data or leave it unencrypted.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "true",
|
||||
Help: "Don't encrypt file data, leave it unencrypted.",
|
||||
},
|
||||
{
|
||||
Value: "false",
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -116,18 +131,18 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
}
|
||||
password, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||
return nil, fmt.Errorf("failed to decrypt password: %w", err)
|
||||
}
|
||||
var salt string
|
||||
if opt.Password2 != "" {
|
||||
salt, err = obscure.Reveal(opt.Password2)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
|
||||
}
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
}
|
||||
return cipher, nil
|
||||
}
|
||||
@@ -144,7 +159,7 @@ func NewCipher(m configmap.Mapper) (*Cipher, error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -159,25 +174,25 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
// Make sure to remove trailing . reffering to the current dir
|
||||
// Make sure to remove trailing . referring to the current dir
|
||||
if path.Base(rpath) == "." {
|
||||
rpath = strings.TrimSuffix(rpath, ".")
|
||||
}
|
||||
// Look for a file first
|
||||
var wrappedFs fs.Fs
|
||||
if rpath == "" {
|
||||
wrappedFs, err = cache.Get(remote)
|
||||
wrappedFs, err = cache.Get(ctx, remote)
|
||||
} else {
|
||||
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err = cache.Get(remotePath)
|
||||
wrappedFs, err = cache.Get(ctx, remotePath)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = cache.Get(remotePath)
|
||||
wrappedFs, err = cache.Get(ctx, remotePath)
|
||||
}
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
@@ -199,7 +214,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
}
|
||||
@@ -209,6 +224,7 @@ type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
FilenameEncryption string `config:"filename_encryption"`
|
||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||
NoDataEncryption bool `config:"no_data_encryption"`
|
||||
Password string `config:"password"`
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
@@ -284,7 +300,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
||||
case fs.Directory:
|
||||
f.addDir(ctx, &newEntries, x)
|
||||
default:
|
||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
@@ -346,6 +362,14 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
if f.opt.NoDataEncryption {
|
||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
if err == nil && o != nil {
|
||||
o = f.newObject(o)
|
||||
}
|
||||
return o, err
|
||||
}
|
||||
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
||||
if err != nil {
|
||||
@@ -382,15 +406,18 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
var dstHash string
|
||||
dstHash, err = o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
return nil, fmt.Errorf("failed to read destination hash: %w", err)
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
if srcHash != "" && dstHash != "" {
|
||||
if srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -444,7 +471,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return do(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -469,7 +496,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return f.newObject(oResult), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -495,7 +522,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -589,24 +616,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
||||
// Open the src for input
|
||||
in, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
return "", fmt.Errorf("failed to open src: %w", err)
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
// Now encrypt the src with the nonce
|
||||
out, err := f.cipher.newEncrypter(in, &nonce)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make encrypter")
|
||||
return "", fmt.Errorf("failed to make encrypter: %w", err)
|
||||
}
|
||||
|
||||
// pipe into hash
|
||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make hasher")
|
||||
return "", fmt.Errorf("failed to make hasher: %w", err)
|
||||
}
|
||||
_, err = io.Copy(m, out)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to hash data")
|
||||
return "", fmt.Errorf("failed to hash data: %w", err)
|
||||
}
|
||||
|
||||
return m.Sums()[hashType], nil
|
||||
@@ -617,16 +644,20 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
if f.opt.NoDataEncryption {
|
||||
return src.Hash(ctx, hashType)
|
||||
}
|
||||
|
||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||
// use a limited read so we only read the header
|
||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
||||
}
|
||||
d, err := f.cipher.newDecrypter(in)
|
||||
if err != nil {
|
||||
_ = in.Close()
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
||||
}
|
||||
nonce := d.nonce
|
||||
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
||||
@@ -645,7 +676,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
||||
// Close d (and hence in) once we have read the nonce
|
||||
err = d.Close()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to close nonce read")
|
||||
return "", fmt.Errorf("failed to close nonce read: %w", err)
|
||||
}
|
||||
|
||||
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
||||
@@ -764,7 +795,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
for _, encryptedFileName := range arg {
|
||||
fileName, err := f.DecryptFileName(encryptedFileName)
|
||||
if err != nil {
|
||||
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
|
||||
return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
|
||||
}
|
||||
out = append(out, fileName)
|
||||
}
|
||||
@@ -822,9 +853,13 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
||||
size := o.Object.Size()
|
||||
if !o.f.opt.NoDataEncryption {
|
||||
var err error
|
||||
size, err = o.f.cipher.DecryptedSize(size)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
@@ -842,6 +877,10 @@ func (o *Object) UnWrap() fs.Object {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
if o.f.opt.NoDataEncryption {
|
||||
return o.Object.Open(ctx, options...)
|
||||
}
|
||||
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
@@ -917,6 +956,16 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
do := f.Fs.Features().Shutdown
|
||||
if do == nil {
|
||||
return nil
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
//
|
||||
// This encrypts the remote name and adjusts the size
|
||||
@@ -950,6 +999,9 @@ func (o *ObjectInfo) Size() int64 {
|
||||
if size < 0 {
|
||||
return size
|
||||
}
|
||||
if o.f.opt.NoDataEncryption {
|
||||
return size
|
||||
}
|
||||
return o.f.cipher.EncryptedSize(size)
|
||||
}
|
||||
|
||||
@@ -1025,6 +1077,7 @@ var (
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
|
||||
@@ -33,7 +33,7 @@ func (o testWrapper) UnWrap() fs.Object {
|
||||
// Create a temporary local fs to upload things from
|
||||
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||
localFs, err := fs.TemporaryLocalFs()
|
||||
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||
@@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
// saved from the encryptor
|
||||
// saved from the encrypter
|
||||
src := f.newObjectInfo(oi, nonce)
|
||||
|
||||
// Test ObjectInfo methods
|
||||
|
||||
@@ -91,3 +91,26 @@ func TestObfuscate(t *testing.T) {
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
// TestNoDataObfuscate runs integration tests against the remote
|
||||
func TestNoDataObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt4"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// buffers which are a multiple of an underlying crypto block size.
|
||||
package pkcs7
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
import "errors"
|
||||
|
||||
// Errors Unpad can return
|
||||
var (
|
||||
|
||||
820
backend/drive/drive.go
Executable file → Normal file
820
backend/drive/drive.go
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
@@ -4,19 +4,24 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
@@ -109,6 +114,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
item := &drive.File{
|
||||
Name: "file",
|
||||
MimeType: "application/vnd.google-apps.document",
|
||||
@@ -126,7 +132,7 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.exportExtensions = test.extensions
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item)
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
if test.wantExtension != "" {
|
||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
||||
@@ -194,7 +200,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
@@ -208,7 +214,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
||||
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
@@ -272,14 +278,15 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
||||
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
ctx := context.Background()
|
||||
srcObj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
@@ -408,6 +415,130 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
obj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
|
||||
checkFile := func(name string) {
|
||||
filePath := filepath.Join(dir, name)
|
||||
fi, err := os.Stat(filePath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(100), fi.Size())
|
||||
err = os.Remove(filePath)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Run("BadID", func(t *testing.T) {
|
||||
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "couldn't find id")
|
||||
})
|
||||
|
||||
t.Run("Directory", func(t *testing.T) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
require.NoError(t, err)
|
||||
err = f.copyID(ctx, rootID, dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't copy directory")
|
||||
})
|
||||
|
||||
t.Run("WithoutDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("WithDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
opt := &filter.Opt{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
flt, err := filter.NewFilter(opt)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defCtx := context.Background()
|
||||
fltCtx := filter.ReplaceConfig(defCtx, flt)
|
||||
|
||||
testCtx1 := fltCtx
|
||||
testCtx2 := filter.SetUseFilter(testCtx1, true)
|
||||
testCtx3, testCancel := context.WithCancel(testCtx2)
|
||||
testCtx4 := filter.SetUseFilter(testCtx3, false)
|
||||
testCancel()
|
||||
assert.False(t, filter.GetUseFilter(testCtx1))
|
||||
assert.True(t, filter.GetUseFilter(testCtx2))
|
||||
assert.True(t, filter.GetUseFilter(testCtx3))
|
||||
assert.False(t, filter.GetUseFilter(testCtx4))
|
||||
|
||||
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir")
|
||||
subFsResult, err := fs.NewFs(defCtx, subRemote)
|
||||
require.NoError(t, err)
|
||||
subFs, isDriveFs := subFsResult.(*Fs)
|
||||
require.True(t, isDriveFs)
|
||||
|
||||
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir1)
|
||||
}()
|
||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir2)
|
||||
}()
|
||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
||||
require.NoError(t, err)
|
||||
|
||||
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
|
||||
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
||||
|
||||
// validate sync/copy
|
||||
const timeQuery = "(modifiedTime >= '"
|
||||
|
||||
assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false))
|
||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false))
|
||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false))
|
||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false))
|
||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
// validate list/walk
|
||||
devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
|
||||
require.NoError(t, errOpen)
|
||||
defer func() {
|
||||
_ = devNull.Close()
|
||||
}()
|
||||
|
||||
assert.NoError(t, operations.List(defCtx, subFs, devNull))
|
||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
||||
|
||||
assert.NoError(t, operations.List(fltCtx, subFs, devNull))
|
||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
@@ -424,6 +555,8 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -77,11 +77,10 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
return false, err
|
||||
}
|
||||
var req *http.Request
|
||||
req, err = http.NewRequest(method, urls, body)
|
||||
req, err = http.NewRequestWithContext(ctx, method, urls, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
@@ -95,7 +94,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
defer googleapi.CloseBody(res)
|
||||
err = googleapi.CheckResponse(res)
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -114,8 +113,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body)
|
||||
req.ContentLength = reqSize
|
||||
totalSize := "*"
|
||||
if rx.ContentLength >= 0 {
|
||||
@@ -204,7 +202,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
||||
again, err := rx.f.shouldRetry(err)
|
||||
again, err := rx.f.shouldRetry(ctx, err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
again = false
|
||||
err = nil
|
||||
|
||||
358
backend/dropbox/batcher.go
Normal file
358
backend/dropbox/batcher.go
Normal file
@@ -0,0 +1,358 @@
|
||||
// This file contains the implementation of the sync batcher for uploads
|
||||
//
|
||||
// Dropbox rules say you can start as many batches as you want, but
|
||||
// you may only have one batch being committed and must wait for the
|
||||
// batch to be finished before committing another.
|
||||
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
const (
|
||||
maxBatchSize = 1000 // max size the batch can be
|
||||
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
|
||||
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
|
||||
defaultBatchSizeAsync = 100 // default batch size if async
|
||||
)
|
||||
|
||||
// batcher holds info about the current items waiting for upload
|
||||
type batcher struct {
|
||||
f *Fs // Fs this batch is part of
|
||||
mode string // configured batch mode
|
||||
size int // maximum size for batch
|
||||
timeout time.Duration // idle timeout for batch
|
||||
async bool // whether we are using async batching
|
||||
in chan batcherRequest // incoming items to batch
|
||||
closed chan struct{} // close to indicate batcher shut down
|
||||
atexit atexit.FnHandle // atexit handle
|
||||
shutOnce sync.Once // make sure we shutdown once only
|
||||
wg sync.WaitGroup // wait for shutdown
|
||||
}
|
||||
|
||||
// batcherRequest holds an incoming request with a place for a reply
|
||||
type batcherRequest struct {
|
||||
commitInfo *files.UploadSessionFinishArg
|
||||
result chan<- batcherResponse
|
||||
}
|
||||
|
||||
// Return true if batcherRequest is the quit request
|
||||
func (br *batcherRequest) isQuit() bool {
|
||||
return br.commitInfo == nil
|
||||
}
|
||||
|
||||
// Send this to get the engine to quit
|
||||
var quitRequest = batcherRequest{}
|
||||
|
||||
// batcherResponse holds a response to be delivered to clients waiting
|
||||
// for a batch to complete.
|
||||
type batcherResponse struct {
|
||||
err error
|
||||
entry *files.FileMetadata
|
||||
}
|
||||
|
||||
// newBatcher creates a new batcher structure
|
||||
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
||||
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
||||
if size > maxBatchSize || size < 0 {
|
||||
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
||||
}
|
||||
|
||||
async := false
|
||||
|
||||
switch mode {
|
||||
case "sync":
|
||||
if size <= 0 {
|
||||
ci := fs.GetConfig(ctx)
|
||||
size = ci.Transfers
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeoutSync
|
||||
}
|
||||
case "async":
|
||||
if size <= 0 {
|
||||
size = defaultBatchSizeAsync
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeoutAsync
|
||||
}
|
||||
async = true
|
||||
case "off":
|
||||
size = 0
|
||||
default:
|
||||
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
||||
}
|
||||
|
||||
b := &batcher{
|
||||
f: f,
|
||||
mode: mode,
|
||||
size: size,
|
||||
timeout: timeout,
|
||||
async: async,
|
||||
in: make(chan batcherRequest, size),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
if b.Batching() {
|
||||
b.atexit = atexit.Register(b.Shutdown)
|
||||
b.wg.Add(1)
|
||||
go b.commitLoop(context.Background())
|
||||
}
|
||||
return b, nil
|
||||
|
||||
}
|
||||
|
||||
// Batching returns true if batching is active
|
||||
func (b *batcher) Batching() bool {
|
||||
return b.size > 0
|
||||
}
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
||||
var arg = &files.UploadSessionFinishBatchArg{
|
||||
Entries: items,
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("batch commit failed: %w", err)
|
||||
}
|
||||
return batchStatus, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
if launchBatchStatus.AsyncJobId == "" {
|
||||
return nil, errors.New("wait for batch completion: empty job ID")
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxSleepTime = 1 * time.Second
|
||||
startTime := time.Now()
|
||||
try := 1
|
||||
for {
|
||||
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
|
||||
if remaining < 0 {
|
||||
break
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > maxSleepTime {
|
||||
sleepTime = maxSleepTime
|
||||
}
|
||||
try++
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||
// If commit fails then signal clients if sync
|
||||
var signalled = b.async
|
||||
defer func() {
|
||||
if err != nil && signalled {
|
||||
// Signal to clients that there was an error
|
||||
for _, result := range results {
|
||||
result <- batcherResponse{err: err}
|
||||
}
|
||||
}
|
||||
}()
|
||||
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
|
||||
fs.Debugf(b.f, "Committing %s", desc)
|
||||
|
||||
// finalise the batch getting either a result or a job id to poll
|
||||
batchStatus, err := b.finishBatch(ctx, items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check whether batch is complete
|
||||
var complete *files.UploadSessionFinishBatchResult
|
||||
switch batchStatus.Tag {
|
||||
case "async_job_id":
|
||||
// wait for batch to complete
|
||||
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "complete":
|
||||
complete = batchStatus.Complete
|
||||
default:
|
||||
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||
}
|
||||
|
||||
// Check we got the right number of entries
|
||||
entries := complete.Entries
|
||||
if len(entries) != len(results) {
|
||||
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||
}
|
||||
|
||||
// Report results to clients
|
||||
var (
|
||||
errorTag = ""
|
||||
errorCount = 0
|
||||
)
|
||||
for i := range results {
|
||||
item := entries[i]
|
||||
resp := batcherResponse{}
|
||||
if item.Tag == "success" {
|
||||
resp.entry = item.Success
|
||||
} else {
|
||||
errorCount++
|
||||
errorTag = item.Tag
|
||||
if item.Failure != nil {
|
||||
errorTag = item.Failure.Tag
|
||||
if item.Failure.LookupFailed != nil {
|
||||
errorTag += "/" + item.Failure.LookupFailed.Tag
|
||||
}
|
||||
if item.Failure.Path != nil {
|
||||
errorTag += "/" + item.Failure.Path.Tag
|
||||
}
|
||||
if item.Failure.PropertiesError != nil {
|
||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
||||
}
|
||||
}
|
||||
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
|
||||
}
|
||||
if !b.async {
|
||||
results[i] <- resp
|
||||
}
|
||||
}
|
||||
// Show signalled so no need to report error to clients from now on
|
||||
signalled = true
|
||||
|
||||
// Report an error if any failed in the batch
|
||||
if errorTag != "" {
|
||||
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
||||
}
|
||||
|
||||
fs.Debugf(b.f, "Committed %s", desc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// commitLoop runs the commit engine in the background
|
||||
func (b *batcher) commitLoop(ctx context.Context) {
|
||||
var (
|
||||
items []*files.UploadSessionFinishArg // current batch of uncommitted files
|
||||
results []chan<- batcherResponse // current batch of clients awaiting results
|
||||
idleTimer = time.NewTimer(b.timeout)
|
||||
commit = func() {
|
||||
err := b.commitBatch(ctx, items, results)
|
||||
if err != nil {
|
||||
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
|
||||
}
|
||||
items, results = nil, nil
|
||||
}
|
||||
)
|
||||
defer b.wg.Done()
|
||||
defer idleTimer.Stop()
|
||||
idleTimer.Stop()
|
||||
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case req := <-b.in:
|
||||
if req.isQuit() {
|
||||
break outer
|
||||
}
|
||||
items = append(items, req.commitInfo)
|
||||
results = append(results, req.result)
|
||||
idleTimer.Stop()
|
||||
if len(items) >= b.size {
|
||||
commit()
|
||||
} else {
|
||||
idleTimer.Reset(b.timeout)
|
||||
}
|
||||
case <-idleTimer.C:
|
||||
if len(items) > 0 {
|
||||
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
|
||||
commit()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// commit any remaining items
|
||||
if len(items) > 0 {
|
||||
commit()
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown finishes any pending batches then shuts everything down
|
||||
//
|
||||
// Can be called from atexit handler
|
||||
func (b *batcher) Shutdown() {
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
// show that batcher is shutting down
|
||||
close(b.closed)
|
||||
// quit the commitLoop by sending a quitRequest message
|
||||
//
|
||||
// Note that we don't close b.in because that will
|
||||
// cause write to closed channel in Commit when we are
|
||||
// exiting due to a signal.
|
||||
b.in <- quitRequest
|
||||
b.wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
// Commit commits the file using a batch call, first adding it to the
|
||||
// batch and then waiting for the batch to complete in a synchronous
|
||||
// way if async is not set.
|
||||
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
|
||||
select {
|
||||
case <-b.closed:
|
||||
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
|
||||
default:
|
||||
}
|
||||
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
|
||||
resp := make(chan batcherResponse, 1)
|
||||
b.in <- batcherRequest{
|
||||
commitInfo: commitInfo,
|
||||
result: resp,
|
||||
}
|
||||
// If running async then don't wait for the result
|
||||
if b.async {
|
||||
return nil, nil
|
||||
}
|
||||
result := <-resp
|
||||
return result.entry, result.err
|
||||
}
|
||||
999
backend/dropbox/dropbox.go
Executable file → Normal file
999
backend/dropbox/dropbox.go
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
44
backend/dropbox/dropbox_internal_test.go
Normal file
44
backend/dropbox/dropbox_internal_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInternalCheckPathLength(t *testing.T) {
|
||||
rep := func(n int, r rune) (out string) {
|
||||
rs := make([]rune, n)
|
||||
for i := range rs {
|
||||
rs[i] = r
|
||||
}
|
||||
return string(rs)
|
||||
}
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
ok bool
|
||||
}{
|
||||
{in: "", ok: true},
|
||||
{in: rep(maxFileNameLength, 'a'), ok: true},
|
||||
{in: rep(maxFileNameLength+1, 'a'), ok: false},
|
||||
{in: rep(maxFileNameLength, '£'), ok: true},
|
||||
{in: rep(maxFileNameLength+1, '£'), ok: false},
|
||||
{in: rep(maxFileNameLength, '☺'), ok: true},
|
||||
{in: rep(maxFileNameLength+1, '☺'), ok: false},
|
||||
{in: rep(maxFileNameLength, '你'), ok: true},
|
||||
{in: rep(maxFileNameLength+1, '你'), ok: false},
|
||||
{in: "/ok/ok", ok: true},
|
||||
{in: "/ok/" + rep(maxFileNameLength, 'a') + "/ok", ok: true},
|
||||
{in: "/ok/" + rep(maxFileNameLength+1, 'a') + "/ok", ok: false},
|
||||
{in: "/ok/" + rep(maxFileNameLength, '£') + "/ok", ok: true},
|
||||
{in: "/ok/" + rep(maxFileNameLength+1, '£') + "/ok", ok: false},
|
||||
{in: "/ok/" + rep(maxFileNameLength, '☺') + "/ok", ok: true},
|
||||
{in: "/ok/" + rep(maxFileNameLength+1, '☺') + "/ok", ok: false},
|
||||
{in: "/ok/" + rep(maxFileNameLength, '你') + "/ok", ok: true},
|
||||
{in: "/ok/" + rep(maxFileNameLength+1, '你') + "/ok", ok: false},
|
||||
} {
|
||||
|
||||
err := checkPathLength(test.in)
|
||||
assert.Equal(t, test.ok, err == nil, test.in)
|
||||
}
|
||||
}
|
||||
@@ -2,14 +2,16 @@ package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -28,7 +30,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Detect this error which the integration tests provoke
|
||||
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
//
|
||||
@@ -48,10 +53,51 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||
|
||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, leaf, directoryID, nil
|
||||
}
|
||||
|
||||
func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
||||
request := FileInfoRequest{
|
||||
URL: url,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/info.cgi",
|
||||
}
|
||||
|
||||
var file File
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &file)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read file info: %w", err)
|
||||
}
|
||||
|
||||
return &file, err
|
||||
}
|
||||
|
||||
// maybe do some actual validation later if necessary
|
||||
func validToken(token *GetTokenResponse) bool {
|
||||
return token.Status == "OK"
|
||||
}
|
||||
|
||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
Single: 1,
|
||||
Pass: f.opt.FilePassword,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -61,10 +107,11 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||
return shouldRetry(resp, err)
|
||||
doretry, err := shouldRetry(ctx, resp, err)
|
||||
return doretry || !validToken(&token), err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
return &token, nil
|
||||
@@ -80,19 +127,25 @@ func fileFromSharedFile(file *SharedFile) File {
|
||||
|
||||
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
}
|
||||
if f.opt.FolderPassword != "" {
|
||||
opts.Method = "POST"
|
||||
opts.Parameters = nil
|
||||
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
|
||||
}
|
||||
|
||||
var sharedFiles SharedFolderResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
entries = make([]fs.DirEntry, len(sharedFiles))
|
||||
@@ -118,10 +171,10 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
||||
filesList = &FilesList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
for i := range filesList.Items {
|
||||
item := &filesList.Items[i]
|
||||
@@ -146,10 +199,10 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
||||
foldersList = &FoldersList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
return nil, fmt.Errorf("couldn't list folders: %w", err)
|
||||
}
|
||||
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
|
||||
for i := range foldersList.SubFolders {
|
||||
@@ -240,10 +293,10 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
|
||||
response = &MakeFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create folder")
|
||||
return nil, fmt.Errorf("couldn't create folder: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
||||
@@ -267,13 +320,13 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||
return nil, fmt.Errorf("couldn't remove folder: %w", err)
|
||||
}
|
||||
if response.Status != "OK" {
|
||||
return nil, errors.New("Can't remove non-empty dir")
|
||||
return nil, fmt.Errorf("can't remove folder: %s", response.Message)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||
@@ -296,11 +349,11 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
||||
response = &GenericOKResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove file")
|
||||
return nil, fmt.Errorf("couldn't remove file: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed file with url `%s`", url)
|
||||
@@ -308,6 +361,84 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename string) (response *MoveFileResponse, err error) {
|
||||
request := &MoveFileRequest{
|
||||
URLs: []string{url},
|
||||
FolderID: folderID,
|
||||
Rename: rename,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/mv.cgi",
|
||||
}
|
||||
|
||||
response = &MoveFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
||||
request := &CopyFileRequest{
|
||||
URLs: []string{url},
|
||||
FolderID: folderID,
|
||||
Rename: rename,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/cp.cgi",
|
||||
}
|
||||
|
||||
response = &CopyFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
|
||||
request := &RenameFileRequest{
|
||||
URLs: []RenameFileURL{
|
||||
{
|
||||
URL: url,
|
||||
Filename: newName,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/rename.cgi",
|
||||
}
|
||||
|
||||
response = &RenameFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
@@ -320,10 +451,10 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
response = &GetUploadNodeResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||
return nil, fmt.Errorf("didnt got an upload node: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
@@ -363,11 +494,11 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't upload file")
|
||||
return nil, fmt.Errorf("couldn't upload file: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
||||
@@ -397,11 +528,11 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
||||
response = &EndFileUploadResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't finish file upload")
|
||||
return nil, fmt.Errorf("couldn't finish file upload: %w", err)
|
||||
}
|
||||
|
||||
return response, err
|
||||
|
||||
@@ -2,6 +2,7 @@ package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -35,17 +35,27 @@ func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
Description: "1Fichier",
|
||||
Config: func(name string, config configmap.Mapper) {
|
||||
},
|
||||
NewFs: NewFs,
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter",
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
||||
Name: "file_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
||||
Name: "folder_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -77,9 +87,11 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
FilePassword string `config:"file_password"`
|
||||
FolderPassword string `config:"folder_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
@@ -167,7 +179,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
//
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(config, opt)
|
||||
if err != nil {
|
||||
@@ -186,16 +198,17 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
||||
baseClient: &http.Client{},
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
ReadMimeType: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
|
||||
@@ -203,8 +216,6 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
@@ -227,7 +238,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&tempF)
|
||||
f.features.Fill(ctx, &tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
@@ -306,10 +317,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
@@ -323,7 +334,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100e9) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
@@ -349,8 +360,10 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(fileUploadResponse.Links) != 1 {
|
||||
return nil, errors.New("unexpected amount of files")
|
||||
if len(fileUploadResponse.Links) == 0 {
|
||||
return nil, errors.New("upload response not found")
|
||||
} else if len(fileUploadResponse.Links) > 1 {
|
||||
fs.Debugf(remote, "Multiple upload responses found, using the first")
|
||||
}
|
||||
|
||||
link := fileUploadResponse.Links[0]
|
||||
@@ -364,7 +377,6 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
fs: f,
|
||||
remote: remote,
|
||||
file: File{
|
||||
ACL: 0,
|
||||
CDN: 0,
|
||||
Checksum: link.Whirlpool,
|
||||
ContentType: "",
|
||||
@@ -417,9 +429,109 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Find current directory ID
|
||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If it is in the correct directory, just rename it
|
||||
var url string
|
||||
if currentDirectoryID == directoryID {
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
|
||||
}
|
||||
url = resp.URLs[0].URL
|
||||
} else {
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
||||
}
|
||||
url = resp.URLs[0]
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, url)
|
||||
if err != nil {
|
||||
return nil, errors.New("couldn't read file data")
|
||||
}
|
||||
dstObj.setMetaData(*file)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side move operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||
if err != nil {
|
||||
return nil, errors.New("couldn't read file data")
|
||||
}
|
||||
dstObj.setMetaData(*file)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return o.(*Object).file.URL, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ dircache.DirCacher = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -4,13 +4,11 @@ package fichier
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFichier:",
|
||||
})
|
||||
|
||||
@@ -2,11 +2,12 @@ package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -72,6 +73,10 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||
//return errors.New("setting modtime is not supported for 1fichier remotes")
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(file File) {
|
||||
o.file = file
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, o.file.Size)
|
||||
@@ -90,7 +95,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.rest.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -118,7 +123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Delete duplicate after successful upload
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to remove old version")
|
||||
return fmt.Errorf("failed to remove old version: %w", err)
|
||||
}
|
||||
|
||||
// Replace guts of old object with new one
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
package fichier
|
||||
|
||||
// FileInfoRequest is the request structure of the corresponding request
|
||||
type FileInfoRequest struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// ListFolderRequest is the request structure of the corresponding request
|
||||
type ListFolderRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
@@ -14,6 +19,7 @@ type ListFilesRequest struct {
|
||||
type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
@@ -49,6 +55,65 @@ type MakeFolderResponse struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// MoveFileRequest is the request structure of the corresponding request
|
||||
type MoveFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
FolderID int `json:"destination_folder_id"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// MoveFileResponse is the response structure of the corresponding request
|
||||
type MoveFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
type CopyFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
FolderID int `json:"folder_id"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// CopyFileResponse is the response structure of the corresponding request
|
||||
type CopyFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Copied int `json:"copied"`
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
type FileCopy struct {
|
||||
FromURL string `json:"from_url"`
|
||||
ToURL string `json:"to_url"`
|
||||
}
|
||||
|
||||
// RenameFileURL is the data structure to rename a single file
|
||||
type RenameFileURL struct {
|
||||
URL string `json:"url"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
|
||||
// RenameFileRequest is the request structure of the corresponding request
|
||||
type RenameFileRequest struct {
|
||||
URLs []RenameFileURL `json:"urls"`
|
||||
Pretty int `json:"pretty"`
|
||||
}
|
||||
|
||||
// RenameFileResponse is the response structure of the corresponding request
|
||||
type RenameFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Renamed int `json:"renamed"`
|
||||
URLs []struct {
|
||||
URL string `json:"url"`
|
||||
OldFilename string `json:"old_filename"`
|
||||
NewFilename string `json:"new_filename"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||
type GetUploadNodeResponse struct {
|
||||
ID string `json:"id"`
|
||||
@@ -86,7 +151,6 @@ type EndFileUploadResponse struct {
|
||||
|
||||
// File is the structure how 1Fichier returns a File
|
||||
type File struct {
|
||||
ACL int `json:"acl"`
|
||||
CDN int `json:"cdn"`
|
||||
Checksum string `json:"checksum"`
|
||||
ContentType string `json:"content-type"`
|
||||
|
||||
427
backend/filefabric/api/types.go
Normal file
427
backend/filefabric/api/types.go
Normal file
@@ -0,0 +1,427 @@
|
||||
// Package api has type definitions for filefabric
|
||||
//
|
||||
// Converted from the API responses with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// TimeFormat for parameters (UTC)
|
||||
timeFormatParameters = `2006-01-02 15:04:05`
|
||||
// "2020-08-11 10:10:04" for JSON parsing
|
||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// filefabric API
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).UTC().Format(timeFormatJSON)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
var zeroTime = []byte(`"0000-00-00 00:00:00"`)
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time (in UTC)
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
// Set a Zero time.Time if we receive a zero time input
|
||||
if bytes.Equal(data, zeroTime) {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
}
|
||||
newT, err := time.Parse(timeFormatJSON, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String turns a Time into a string in UTC suitable for the API
|
||||
// parameters
|
||||
func (t Time) String() string {
|
||||
return time.Time(t).UTC().Format(timeFormatParameters)
|
||||
}
|
||||
|
||||
// Int represents an integer which can be represented in JSON as a
|
||||
// quoted integer or an integer.
|
||||
type Int int
|
||||
|
||||
// MarshalJSON turns a Int into JSON
|
||||
func (i *Int) MarshalJSON() (out []byte, err error) {
|
||||
return json.Marshal((*int)(i))
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Int
|
||||
func (i *Int) UnmarshalJSON(data []byte) error {
|
||||
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
|
||||
data = data[1 : len(data)-1]
|
||||
}
|
||||
return json.Unmarshal(data, (*int)(i))
|
||||
}
|
||||
|
||||
// String represents an string which can be represented in JSON as a
|
||||
// quoted string or an integer.
|
||||
type String string
|
||||
|
||||
// MarshalJSON turns a String into JSON
|
||||
func (s *String) MarshalJSON() (out []byte, err error) {
|
||||
return json.Marshal((*string)(s))
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a String
|
||||
func (s *String) UnmarshalJSON(data []byte) error {
|
||||
err := json.Unmarshal(data, (*string)(s))
|
||||
if err != nil {
|
||||
*s = String(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status return returned in all status responses
|
||||
type Status struct {
|
||||
Code string `json:"status"`
|
||||
Message string `json:"statusmessage"`
|
||||
TaskID String `json:"taskid"`
|
||||
// Warning string `json:"warning"` // obsolete
|
||||
}
|
||||
|
||||
// Status statisfies the error interface
|
||||
func (e *Status) Error() string {
|
||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||
}
|
||||
|
||||
// OK returns true if the status is all good
|
||||
func (e *Status) OK() bool {
|
||||
return e.Code == "ok"
|
||||
}
|
||||
|
||||
// GetCode returns the status code if any
|
||||
func (e *Status) GetCode() string {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// OKError defines an interface for items which can be OK or be an error
|
||||
type OKError interface {
|
||||
error
|
||||
OK() bool
|
||||
GetCode() string
|
||||
}
|
||||
|
||||
// Check Status satisfies the OKError interface
|
||||
var _ OKError = (*Status)(nil)
|
||||
|
||||
// EmptyResponse is response which just returns the error condition
|
||||
type EmptyResponse struct {
|
||||
Status
|
||||
}
|
||||
|
||||
// GetTokenByAuthTokenResponse is the response to getTokenByAuthToken
|
||||
type GetTokenByAuthTokenResponse struct {
|
||||
Status
|
||||
Token string `json:"token"`
|
||||
UserID string `json:"userid"`
|
||||
AllowLoginRemember string `json:"allowloginremember"`
|
||||
LastLogin Time `json:"lastlogin"`
|
||||
AutoLoginCode string `json:"autologincode"`
|
||||
}
|
||||
|
||||
// ApplianceInfo is the response to getApplianceInfo
|
||||
type ApplianceInfo struct {
|
||||
Status
|
||||
Sitetitle string `json:"sitetitle"`
|
||||
OauthLoginSupport string `json:"oauthloginsupport"`
|
||||
IsAppliance string `json:"isappliance"`
|
||||
SoftwareVersion string `json:"softwareversion"`
|
||||
SoftwareVersionLabel string `json:"softwareversionlabel"`
|
||||
}
|
||||
|
||||
// GetFolderContentsResponse is returned from getFolderContents
|
||||
type GetFolderContentsResponse struct {
|
||||
Status
|
||||
Total int `json:"total,string"`
|
||||
Items []Item `json:"filelist"`
|
||||
Folder Item `json:"folder"`
|
||||
From Int `json:"from"`
|
||||
//Count int `json:"count"`
|
||||
Pid string `json:"pid"`
|
||||
RefreshResult Status `json:"refreshresult"`
|
||||
// Curfolder Item `json:"curfolder"` - sometimes returned as "ROOT"?
|
||||
Parents []Item `json:"parents"`
|
||||
CustomPermissions CustomPermissions `json:"custompermissions"`
|
||||
}
|
||||
|
||||
// ItemType determine whether it is a file or a folder
|
||||
type ItemType uint8
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFile ItemType = 0
|
||||
ItemTypeFolder ItemType = 1
|
||||
)
|
||||
|
||||
// Item ia a File or a Folder
|
||||
type Item struct {
|
||||
ID string `json:"fi_id"`
|
||||
PID string `json:"fi_pid"`
|
||||
// UID string `json:"fi_uid"`
|
||||
Name string `json:"fi_name"`
|
||||
// S3Name string `json:"fi_s3name"`
|
||||
// Extension string `json:"fi_extension"`
|
||||
// Description string `json:"fi_description"`
|
||||
Type ItemType `json:"fi_type,string"`
|
||||
// Created Time `json:"fi_created"`
|
||||
Size int64 `json:"fi_size,string"`
|
||||
ContentType string `json:"fi_contenttype"`
|
||||
// Tags string `json:"fi_tags"`
|
||||
// MainCode string `json:"fi_maincode"`
|
||||
// Public int `json:"fi_public,string"`
|
||||
// Provider string `json:"fi_provider"`
|
||||
// ProviderFolder string `json:"fi_providerfolder"` // folder
|
||||
// Encrypted int `json:"fi_encrypted,string"`
|
||||
// StructType string `json:"fi_structtype"`
|
||||
// Bname string `json:"fi_bname"` // folder
|
||||
// OrgID string `json:"fi_orgid"`
|
||||
// Favorite int `json:"fi_favorite,string"`
|
||||
// IspartOf string `json:"fi_ispartof"` // folder
|
||||
Modified Time `json:"fi_modified"`
|
||||
// LastAccessed Time `json:"fi_lastaccessed"`
|
||||
// Hits int64 `json:"fi_hits,string"`
|
||||
// IP string `json:"fi_ip"` // folder
|
||||
// BigDescription string `json:"fi_bigdescription"`
|
||||
LocalTime Time `json:"fi_localtime"`
|
||||
// OrgfolderID string `json:"fi_orgfolderid"`
|
||||
// StorageIP string `json:"fi_storageip"` // folder
|
||||
// RemoteTime Time `json:"fi_remotetime"`
|
||||
// ProviderOptions string `json:"fi_provideroptions"`
|
||||
// Access string `json:"fi_access"`
|
||||
// Hidden string `json:"fi_hidden"` // folder
|
||||
// VersionOf string `json:"fi_versionof"`
|
||||
Trash bool `json:"trash"`
|
||||
// Isbucket string `json:"isbucket"` // filelist
|
||||
SubFolders int64 `json:"subfolders"` // folder
|
||||
}
|
||||
|
||||
// ItemFields is a | separated list of fields in Item
|
||||
var ItemFields = mustFields(Item{})
|
||||
|
||||
// fields returns the JSON fields in use by opt as a | separated
|
||||
// string.
|
||||
func fields(opt interface{}) (pipeTags string, err error) {
|
||||
var tags []string
|
||||
def := reflect.ValueOf(opt)
|
||||
defType := def.Type()
|
||||
for i := 0; i < def.NumField(); i++ {
|
||||
field := defType.Field(i)
|
||||
tag, ok := field.Tag.Lookup("json")
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if comma := strings.IndexRune(tag, ','); comma >= 0 {
|
||||
tag = tag[:comma]
|
||||
}
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
return strings.Join(tags, "|"), nil
|
||||
}
|
||||
|
||||
// mustFields returns the JSON fields in use by opt as a | separated
|
||||
// string. It panics on failure.
|
||||
func mustFields(opt interface{}) string {
|
||||
tags, err := fields(opt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
// CustomPermissions is returned as part of GetFolderContentsResponse
|
||||
type CustomPermissions struct {
|
||||
Upload string `json:"upload"`
|
||||
CreateSubFolder string `json:"createsubfolder"`
|
||||
Rename string `json:"rename"`
|
||||
Delete string `json:"delete"`
|
||||
Move string `json:"move"`
|
||||
ManagePermissions string `json:"managepermissions"`
|
||||
ListOnly string `json:"listonly"`
|
||||
VisibleInTrash string `json:"visibleintrash"`
|
||||
}
|
||||
|
||||
// DoCreateNewFolderResponse is response from foCreateNewFolder
|
||||
type DoCreateNewFolderResponse struct {
|
||||
Status
|
||||
Item Item `json:"file"`
|
||||
}
|
||||
|
||||
// DoInitUploadResponse is response from doInitUpload
|
||||
type DoInitUploadResponse struct {
|
||||
Status
|
||||
ProviderID string `json:"providerid"`
|
||||
UploadCode string `json:"uploadcode"`
|
||||
FileType string `json:"filetype"`
|
||||
DirectUploadSupport string `json:"directuploadsupport"`
|
||||
ResumeAllowed string `json:"resumeallowed"`
|
||||
}
|
||||
|
||||
// UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi
|
||||
//
|
||||
// Sometimes the response is returned as XML and sometimes as JSON
|
||||
type UploaderResponse struct {
|
||||
FileSize int64 `xml:"filesize" json:"filesize,string"`
|
||||
MD5 string `xml:"md5" json:"md5"`
|
||||
Success string `xml:"success" json:"success"`
|
||||
}
|
||||
|
||||
// UploadStatus is returned from getUploadStatus
|
||||
type UploadStatus struct {
|
||||
Status
|
||||
UploadCode string `json:"uploadcode"`
|
||||
Metafile string `json:"metafile"`
|
||||
Percent int `json:"percent,string"`
|
||||
Uploaded int64 `json:"uploaded,string"`
|
||||
Size int64 `json:"size,string"`
|
||||
Filename string `json:"filename"`
|
||||
Nofile string `json:"nofile"`
|
||||
Completed string `json:"completed"`
|
||||
Completsuccess string `json:"completsuccess"`
|
||||
Completerror string `json:"completerror"`
|
||||
}
|
||||
|
||||
// DoCompleteUploadResponse is the response to doCompleteUpload
|
||||
type DoCompleteUploadResponse struct {
|
||||
Status
|
||||
UploadedSize int64 `json:"uploadedsize,string"`
|
||||
StorageIP string `json:"storageip"`
|
||||
UploadedName string `json:"uploadedname"`
|
||||
// Versioned []interface{} `json:"versioned"`
|
||||
// VersionedID int `json:"versionedid"`
|
||||
// Comment interface{} `json:"comment"`
|
||||
File Item `json:"file"`
|
||||
// UsSize string `json:"us_size"`
|
||||
// PaSize string `json:"pa_size"`
|
||||
// SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||
}
|
||||
|
||||
// Providers is returned as part of UploadResponse
|
||||
type Providers struct {
|
||||
Max string `json:"max"`
|
||||
Used string `json:"used"`
|
||||
ID string `json:"id"`
|
||||
Private string `json:"private"`
|
||||
Limit string `json:"limit"`
|
||||
Percent int `json:"percent"`
|
||||
}
|
||||
|
||||
// Total is returned as part of UploadResponse
|
||||
type Total struct {
|
||||
Max string `json:"max"`
|
||||
Used string `json:"used"`
|
||||
ID string `json:"id"`
|
||||
Priused string `json:"priused"`
|
||||
Primax string `json:"primax"`
|
||||
Limit string `json:"limit"`
|
||||
Percent int `json:"percent"`
|
||||
Pripercent int `json:"pripercent"`
|
||||
}
|
||||
|
||||
// UploadResponse is returned as part of SpaceInfo
|
||||
type UploadResponse struct {
|
||||
Providers []Providers `json:"providers"`
|
||||
Total Total `json:"total"`
|
||||
}
|
||||
|
||||
// SpaceInfo is returned as part of DoCompleteUploadResponse
|
||||
type SpaceInfo struct {
|
||||
Response UploadResponse `json:"response"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// DeleteResponse is returned from doDeleteFile
|
||||
type DeleteResponse struct {
|
||||
Status
|
||||
Deleted []string `json:"deleted"`
|
||||
Errors []interface{} `json:"errors"`
|
||||
ID string `json:"fi_id"`
|
||||
BackgroundTask int `json:"backgroundtask"`
|
||||
UsSize string `json:"us_size"`
|
||||
PaSize string `json:"pa_size"`
|
||||
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||
}
|
||||
|
||||
// FileResponse is returned from doRenameFile
|
||||
type FileResponse struct {
|
||||
Status
|
||||
Item Item `json:"file"`
|
||||
Exists string `json:"exists"`
|
||||
}
|
||||
|
||||
// MoveFilesResponse is returned from doMoveFiles
|
||||
type MoveFilesResponse struct {
|
||||
Status
|
||||
Filesleft string `json:"filesleft"`
|
||||
Addedtobackground string `json:"addedtobackground"`
|
||||
Moved string `json:"moved"`
|
||||
Item Item `json:"file"`
|
||||
IDs []string `json:"fi_ids"`
|
||||
Length int `json:"length"`
|
||||
DirID string `json:"dir_id"`
|
||||
MovedObjects []Item `json:"movedobjects"`
|
||||
// FolderTasks []interface{} `json:"foldertasks"`
|
||||
}
|
||||
|
||||
// TasksResponse is the response to getUserBackgroundTasks
|
||||
type TasksResponse struct {
|
||||
Status
|
||||
Tasks []Task `json:"tasks"`
|
||||
Total string `json:"total"`
|
||||
}
|
||||
|
||||
// BtData is part of TasksResponse
|
||||
type BtData struct {
|
||||
Callback string `json:"callback"`
|
||||
}
|
||||
|
||||
// Task describes a task returned in TasksResponse
|
||||
type Task struct {
|
||||
BtID string `json:"bt_id"`
|
||||
UsID string `json:"us_id"`
|
||||
BtType string `json:"bt_type"`
|
||||
BtData BtData `json:"bt_data"`
|
||||
BtStatustext string `json:"bt_statustext"`
|
||||
BtStatusdata string `json:"bt_statusdata"`
|
||||
BtMessage string `json:"bt_message"`
|
||||
BtProcent string `json:"bt_procent"`
|
||||
BtAdded string `json:"bt_added"`
|
||||
BtStatus string `json:"bt_status"`
|
||||
BtCompleted string `json:"bt_completed"`
|
||||
BtTitle string `json:"bt_title"`
|
||||
BtCredentials string `json:"bt_credentials"`
|
||||
BtHidden string `json:"bt_hidden"`
|
||||
BtAutoremove string `json:"bt_autoremove"`
|
||||
BtDevsite string `json:"bt_devsite"`
|
||||
BtPriority string `json:"bt_priority"`
|
||||
BtReport string `json:"bt_report"`
|
||||
BtSitemarker string `json:"bt_sitemarker"`
|
||||
BtExecuteafter string `json:"bt_executeafter"`
|
||||
BtCompletestatus string `json:"bt_completestatus"`
|
||||
BtSubtype string `json:"bt_subtype"`
|
||||
BtCanceled string `json:"bt_canceled"`
|
||||
Callback string `json:"callback"`
|
||||
CanBeCanceled bool `json:"canbecanceled"`
|
||||
CanBeRestarted bool `json:"canberestarted"`
|
||||
Type string `json:"type"`
|
||||
Status string `json:"status"`
|
||||
Settings string `json:"settings"`
|
||||
}
|
||||
1359
backend/filefabric/filefabric.go
Normal file
1359
backend/filefabric/filefabric.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/filefabric/filefabric_test.go
Normal file
17
backend/filefabric/filefabric_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test filefabric filesystem interface
|
||||
package filefabric_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/filefabric"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFileFabric:",
|
||||
NilObject: (*filefabric.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -4,9 +4,11 @@ package ftp
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -14,18 +16,31 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -34,64 +49,121 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
Help: "FTP username, leave blank for current username, " + currentUser + ".",
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
Help: "FTP port, leave blank to use default (21).",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
Help: "FTP password.",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: `Use FTPS over TLS (Implicit)
|
||||
When using implicit FTP over TLS the client will connect using TLS
|
||||
right from the start, which in turn breaks the compatibility with
|
||||
Help: `Use Implicit FTPS (FTP over TLS).
|
||||
|
||||
When using implicit FTP over TLS the client connects using TLS
|
||||
right from the start which breaks compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "explicit_tls",
|
||||
Help: `Use FTP over TLS (Explicit)
|
||||
When using explicit FTP over TLS the client explicitly request
|
||||
Help: `Use Explicit FTPS (FTP over TLS).
|
||||
|
||||
When using explicit FTP over TLS the client explicitly requests
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_certificate",
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Help: "Do not verify the TLS certificate of the server.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_epsv",
|
||||
Help: "Disable using EPSV even if server advertises support",
|
||||
Help: "Disable using EPSV even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_mlsd",
|
||||
Help: "Disable using MLSD even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writing_mdtm",
|
||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections.
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "close_timeout",
|
||||
Help: "Maximum time to wait for a response to close.",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tls_cache_size",
|
||||
Help: `Size of TLS session cache for all control and data connections.
|
||||
|
||||
TLS cache allows to resume TLS sessions and reuse PSK between connections.
|
||||
Increase if default size is not enough resulting in TLS resumption errors.
|
||||
Enabled by default. Use 0 to disable.`,
|
||||
Default: 32,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_tls13",
|
||||
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shut_timeout",
|
||||
Help: "Maximum time to wait for data connection closing status.",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: `Allow asking for FTP password when needed.
|
||||
|
||||
If this is set and no password is supplied then rclone will ask for a password
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// The FTP protocol can't handle trailing spaces (for instance
|
||||
// pureftpd turns them into _)
|
||||
//
|
||||
// proftpd can't handle '*' in file names
|
||||
// pureftpd can't handle '[', ']' or '*'
|
||||
// The FTP protocol can't handle trailing spaces
|
||||
// (for instance, pureftpd turns them into '_')
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeRightSpace),
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Asterisk,Ctl,Dot,Slash",
|
||||
Help: "ProFTPd can't handle '*' in file names",
|
||||
}, {
|
||||
Value: "BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket",
|
||||
Help: "PureFTPd can't handle '[]' or '*' in file names",
|
||||
}, {
|
||||
Value: "Ctl,LeftPeriod,Slash",
|
||||
Help: "VsFTPd can't handle file names starting with dot",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -104,25 +176,40 @@ type Options struct {
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
ExplicitTLS bool `config:"explicit_tls"`
|
||||
TLSCacheSize int `config:"tls_cache_size"`
|
||||
DisableTLS13 bool `config:"disable_tls13"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
url string
|
||||
user string
|
||||
pass string
|
||||
dialAddr string
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
tlsConf *tls.Config
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
fLstTime bool // true if the List call returns precise time
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
@@ -137,6 +224,7 @@ type FileInfo struct {
|
||||
Name string
|
||||
Size uint64
|
||||
ModTime time.Time
|
||||
precise bool // true if the time is precise
|
||||
IsDir bool
|
||||
}
|
||||
|
||||
@@ -199,51 +287,88 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
||||
if f.opt.TLS && f.opt.ExplicitTLS {
|
||||
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
} else if f.opt.TLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
|
||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
if f.tlsConf != nil && err == nil {
|
||||
conn = tls.Client(conn, f.tlsConf)
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
return
|
||||
}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
|
||||
|
||||
if f.opt.TLS {
|
||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||
// as a trigger for sending PSBZ and PROT options to server.
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
// Initial connection needs to be cleartext for explicit TLS
|
||||
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
}
|
||||
if fs.Config.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: fs.Config.Dump&fs.DumpAuth != 0}))
|
||||
if f.opt.DisableMLSD {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
||||
}
|
||||
if f.opt.WritingMDTM {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
||||
}
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
err = c.Login(f.user, f.pass)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
||||
err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err)
|
||||
}
|
||||
err = c.Login(f.user, f.pass)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Login")
|
||||
}
|
||||
return c, nil
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Get an FTP connection from the pool, or open a new one
|
||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
f.tokens.Get()
|
||||
}
|
||||
accounting.LimitTPS(ctx)
|
||||
f.poolMu.Lock()
|
||||
if len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
@@ -253,7 +378,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
c, err = f.ftpConnection()
|
||||
c, err = f.ftpConnection(ctx)
|
||||
if err != nil && f.opt.Concurrency > 0 {
|
||||
f.tokens.Put()
|
||||
}
|
||||
@@ -280,8 +405,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
_, isRegularError := errors.Cause(err).(*textproto.Error)
|
||||
if !isRegularError {
|
||||
var tpErr *textproto.Error
|
||||
if !errors.As(err, &tpErr) {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
@@ -292,12 +417,34 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// Drain the pool of any connections
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "closing %d unused connections", len(f.pool))
|
||||
}
|
||||
for i, c := range f.pool {
|
||||
if cErr := c.Quit(); cErr != nil {
|
||||
err = cErr
|
||||
}
|
||||
f.pool[i] = nil
|
||||
}
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
ctx := context.Background()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -305,13 +452,18 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||
pass := ""
|
||||
if opt.AskPassword && opt.Pass == "" {
|
||||
pass = config.GetPassword("FTP server password")
|
||||
} else {
|
||||
pass, err = obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFS decrypt password: %w", err)
|
||||
}
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
user = currentUser
|
||||
}
|
||||
port := opt.Port
|
||||
if port == "" {
|
||||
@@ -323,24 +475,54 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
if opt.TLS {
|
||||
protocol = "ftps://"
|
||||
}
|
||||
if opt.TLS && opt.ExplicitTLS {
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
}
|
||||
var tlsConfig *tls.Config
|
||||
if opt.TLS || opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: opt.Host,
|
||||
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||
}
|
||||
if opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
|
||||
}
|
||||
if opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
url: u,
|
||||
user: user,
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
tlsConf: tlsConfig,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
return nil, fmt.Errorf("NewFs: %w", err)
|
||||
}
|
||||
f.fGetTime = c.IsGetTimeSupported()
|
||||
f.fSetTime = c.IsSetTimeSupported()
|
||||
f.fLstTime = c.IsTimePreciseInList()
|
||||
if !f.fLstTime && f.fGetTime {
|
||||
f.features.SlowModTime = true
|
||||
}
|
||||
f.putFtpConnection(&c, nil)
|
||||
if root != "" {
|
||||
@@ -352,7 +534,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
return f, nil
|
||||
@@ -365,6 +547,12 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return f.drainPool(ctx)
|
||||
}
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
@@ -409,7 +597,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
||||
}
|
||||
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
@@ -423,9 +611,9 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
return nil, fmt.Errorf("findItem: %w", err)
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -445,7 +633,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(remote)
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -454,23 +642,22 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
o.info = &FileInfo{
|
||||
Name: remote,
|
||||
Size: entry.Size,
|
||||
ModTime: entry.Time,
|
||||
precise: f.fLstTime,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(remote)
|
||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
return false, fmt.Errorf("dirExists: %w", err)
|
||||
}
|
||||
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
||||
return true, nil
|
||||
@@ -489,9 +676,9 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
return nil, fmt.Errorf("list: %w", err)
|
||||
}
|
||||
|
||||
var listErr error
|
||||
@@ -510,7 +697,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}()
|
||||
|
||||
// Wait for List for up to Timeout seconds
|
||||
timer := time.NewTimer(fs.Config.Timeout)
|
||||
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
|
||||
select {
|
||||
case listErr = <-errchan:
|
||||
timer.Stop()
|
||||
@@ -527,9 +714,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// doesn't exist, so check it really doesn't exist if no
|
||||
// entries found.
|
||||
if len(files) == 0 {
|
||||
exists, err := f.dirExists(dir)
|
||||
exists, err := f.dirExists(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
return nil, fmt.Errorf("list: %w", err)
|
||||
}
|
||||
if !exists {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
@@ -555,6 +742,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
Name: newremote,
|
||||
Size: object.Size,
|
||||
ModTime: object.Time,
|
||||
precise: f.fLstTime,
|
||||
}
|
||||
o.info = info
|
||||
entries = append(entries, o)
|
||||
@@ -568,8 +756,18 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Precision shows Modified Time not supported
|
||||
// Precision shows whether modified time is supported or not depending on the
|
||||
// FTP server capabilities, namely whether FTP server:
|
||||
// - accepts the MDTM command to get file time (fGetTime)
|
||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
||||
// - accepts the MFMT command to set file time (fSetTime)
|
||||
// or non-standard form of the MDTM command (fSetTime, too)
|
||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if (f.fGetTime || f.fLstTime) && f.fSetTime {
|
||||
return time.Second
|
||||
}
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
@@ -580,9 +778,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||
err := f.mkParentDir(src.Remote())
|
||||
err := f.mkParentDir(ctx, src.Remote())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||
return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
@@ -598,14 +796,14 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
|
||||
// getInfo reads the FileInfo for a path
|
||||
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
return nil, fmt.Errorf("getInfo: %w", err)
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -621,6 +819,7 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
@@ -630,28 +829,28 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using unrooted paths
|
||||
func (f *Fs) mkdir(abspath string) error {
|
||||
func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
abspath = path.Clean(abspath)
|
||||
if abspath == "." || abspath == "/" {
|
||||
return nil
|
||||
}
|
||||
fi, err := f.getInfo(abspath)
|
||||
fi, err := f.getInfo(ctx, abspath)
|
||||
if err == nil {
|
||||
if fi.IsDir {
|
||||
return nil
|
||||
}
|
||||
return fs.ErrorIsFile
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
||||
return fmt.Errorf("mkdir %q failed: %w", abspath, err)
|
||||
}
|
||||
parent := path.Dir(abspath)
|
||||
err = f.mkdir(parent)
|
||||
err = f.mkdir(ctx, parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, connErr := f.getFtpConnection()
|
||||
c, connErr := f.getFtpConnection(ctx)
|
||||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
return fmt.Errorf("mkdir: %w", connErr)
|
||||
}
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -669,25 +868,25 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
|
||||
// mkParentDir makes the parent of remote if necessary and any
|
||||
// directories above that
|
||||
func (f *Fs) mkParentDir(remote string) error {
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
parent := path.Dir(remote)
|
||||
return f.mkdir(path.Join(f.root, parent))
|
||||
return f.mkdir(ctx, path.Join(f.root, parent))
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||
root := path.Join(f.root, dir)
|
||||
return f.mkdir(root)
|
||||
return f.mkdir(ctx, root)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
return fmt.Errorf("Rmdir: %w", translateErrorFile(err))
|
||||
}
|
||||
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
@@ -701,13 +900,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err := f.mkParentDir(remote)
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||
return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
|
||||
}
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
return nil, fmt.Errorf("Move: %w", err)
|
||||
}
|
||||
err = c.Rename(
|
||||
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||
@@ -715,17 +914,17 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move Rename failed")
|
||||
return nil, fmt.Errorf("Move Rename failed: %w", err)
|
||||
}
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||
return nil, fmt.Errorf("Move NewObject failed: %w", err)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -742,26 +941,26 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
fi, err := f.getInfo(dstPath)
|
||||
fi, err := f.getInfo(ctx, dstPath)
|
||||
if err == nil {
|
||||
if fi.IsDir {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
return fs.ErrorIsFile
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return errors.Wrapf(err, "DirMove getInfo failed")
|
||||
return fmt.Errorf("DirMove getInfo failed: %w", err)
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkdir(path.Dir(dstPath))
|
||||
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
|
||||
}
|
||||
|
||||
// Do the move
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
return fmt.Errorf("DirMove: %w", err)
|
||||
}
|
||||
err = c.Rename(
|
||||
f.dirFromStandardPath(srcPath),
|
||||
@@ -769,7 +968,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
|
||||
return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -806,12 +1005,41 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
if !o.info.precise && o.fs.fGetTime {
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err == nil {
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
path = o.fs.opt.Enc.FromStandardPath(path)
|
||||
modTime, err := c.GetTime(path)
|
||||
if err == nil && o.info != nil {
|
||||
o.info.ModTime = modTime
|
||||
o.info.precise = true
|
||||
}
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
}
|
||||
return o.info.ModTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return nil
|
||||
if !o.fs.fSetTime {
|
||||
fs.Errorf(o.fs, "SetModTime is not supported")
|
||||
return nil
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
path = o.fs.opt.Enc.FromStandardPath(path)
|
||||
err = c.SetTime(path, modTime.In(time.UTC))
|
||||
if err == nil && o.info != nil {
|
||||
o.info.ModTime = modTime
|
||||
o.info.precise = true
|
||||
}
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
@@ -843,8 +1071,12 @@ func (f *ftpReadCloser) Close() error {
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
// Wait for Close for up to 60 seconds by default
|
||||
closeTimeout := f.f.opt.CloseTimeout
|
||||
if closeTimeout == 0 {
|
||||
closeTimeout = fs.DurationOff
|
||||
}
|
||||
timer := time.NewTimer(time.Duration(closeTimeout))
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
@@ -891,14 +1123,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection()
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
@@ -926,21 +1158,35 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(o, "Removed after failed upload: %v", err)
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection()
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
return fmt.Errorf("Update: %w", err)
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
if err != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
// recycle connection in advance to let remove() find free token
|
||||
o.fs.putFtpConnection(nil, err)
|
||||
return errors.Wrap(err, "update stor")
|
||||
remove()
|
||||
return fmt.Errorf("update stor: %w", err)
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
o.info, err = o.fs.getInfo(path)
|
||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||
return fmt.Errorf("SetModTime: %w", err)
|
||||
}
|
||||
o.info, err = o.fs.getInfo(ctx, path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "update getinfo")
|
||||
return fmt.Errorf("update getinfo: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -950,16 +1196,16 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// defer fs.Trace(o, "")("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// Check if it's a directory or a file
|
||||
info, err := o.fs.getInfo(path)
|
||||
info, err := o.fs.getInfo(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir {
|
||||
err = o.fs.Rmdir(ctx, o.remote)
|
||||
} else {
|
||||
c, err := o.fs.getFtpConnection()
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
return fmt.Errorf("Remove: %w", err)
|
||||
}
|
||||
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
@@ -973,5 +1219,6 @@ var (
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
115
backend/ftp/ftp_internal_test.go
Normal file
115
backend/ftp/ftp_internal_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type settings map[string]interface{}
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
configMap := configmap.Simple{}
|
||||
for key, val := range opts {
|
||||
configMap[key] = fmt.Sprintf("%v", val)
|
||||
}
|
||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), f.Root())
|
||||
fixFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
return fixFs
|
||||
}
|
||||
|
||||
// test that big file uploads do not cause network i/o timeout
|
||||
func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
const (
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 40 * time.Millisecond // small because test server is local
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
)
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("not running with -short")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
saveLowLevelRetries := ci.LowLevelRetries
|
||||
saveTimeout := ci.Timeout
|
||||
defer func() {
|
||||
ci.LowLevelRetries = saveLowLevelRetries
|
||||
ci.Timeout = saveTimeout
|
||||
}()
|
||||
ci.LowLevelRetries = 1
|
||||
ci.Timeout = idleTimeout
|
||||
|
||||
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
|
||||
fixFs := deriveFs(ctx, t, f, settings{
|
||||
"concurrency": concurrency,
|
||||
"shut_timeout": shutTimeout,
|
||||
})
|
||||
|
||||
// Make test object
|
||||
fileTime := fstest.Time("2020-03-08T09:30:00.000000000Z")
|
||||
meta := object.NewStaticObjectInfo("upload-timeout.test", fileTime, int64(fileSize), true, nil, nil)
|
||||
data := readers.NewPatternReader(int64(fileSize))
|
||||
|
||||
// Run upload and ensure maximum time
|
||||
done := make(chan bool)
|
||||
deadline := time.After(maxTime)
|
||||
go func() {
|
||||
obj, err = fixFs.Put(ctx, data, meta)
|
||||
done <- true
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-deadline:
|
||||
t.Fatalf("Upload got stuck for %v !", maxTime)
|
||||
}
|
||||
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// non-zero shut_timeout should fix i/o errors
|
||||
obj, err := upload(f.opt.Concurrency, time.Second)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, obj)
|
||||
if obj != nil {
|
||||
_ = obj.Remove(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// rclone must support precise time with ProFtpd and PureFtpd out of the box.
|
||||
// The VsFtpd server does not support the MFMT command to set file time like
|
||||
// other servers but by default supports the MDTM command in the non-standard
|
||||
// two-argument form for the same purpose.
|
||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
||||
func (f *Fs) testTimePrecision(t *testing.T) {
|
||||
name := f.Name()
|
||||
if pos := strings.Index(name, "{"); pos != -1 {
|
||||
name = name[:pos]
|
||||
}
|
||||
switch name {
|
||||
case "TestFTPProftpd", "TestFTPPureftpd", "TestFTPVsftpd":
|
||||
assert.LessOrEqual(t, f.Precision(), time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("UploadTimeout", f.testUploadTimeout)
|
||||
t.Run("TimePrecision", f.testTimePrecision)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
@@ -9,25 +9,27 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
// TestIntegration runs integration tests against rclone FTP server
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPProftpd:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPRclone:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration3(t *testing.T) {
|
||||
// TestIntegrationProftpd runs integration tests against proFTPd
|
||||
func TestIntegrationProftpd(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPProftpd:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegrationPureftpd runs integration tests against pureFTPd
|
||||
func TestIntegrationPureftpd(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
@@ -37,12 +39,13 @@ func TestIntegration3(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// func TestIntegration4(t *testing.T) {
|
||||
// if *fstest.RemoteName != "" {
|
||||
// t.Skip("skipping as -remote is set")
|
||||
// }
|
||||
// fstests.Run(t, &fstests.Opt{
|
||||
// RemoteName: "TestFTPVsftpd:",
|
||||
// NilObject: (*ftp.Object)(nil),
|
||||
// })
|
||||
// }
|
||||
// TestIntegrationVsftpd runs integration tests against vsFTPd
|
||||
func TestIntegrationVsftpd(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPVsftpd:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -16,16 +16,16 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -51,10 +51,10 @@ import (
|
||||
const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
timeFormat = time.RFC3339Nano
|
||||
metaMtime = "mtime" // key to store mtime in metadata
|
||||
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
@@ -76,72 +76,71 @@ func init() {
|
||||
Prefix: "gcs",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config("google cloud storage", name, m, storageConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: storageConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "object_acl",
|
||||
Help: "Access Control List for new objects.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "authenticatedRead",
|
||||
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
|
||||
Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
|
||||
}, {
|
||||
Value: "bucketOwnerFullControl",
|
||||
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
|
||||
Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.",
|
||||
}, {
|
||||
Value: "bucketOwnerRead",
|
||||
Help: "Object owner gets OWNER access, and project team owners get READER access.",
|
||||
Help: "Object owner gets OWNER access.\nProject team owners get READER access.",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Object owner gets OWNER access [default if left blank].",
|
||||
Help: "Object owner gets OWNER access.\nDefault if left blank.",
|
||||
}, {
|
||||
Value: "projectPrivate",
|
||||
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
|
||||
Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.",
|
||||
}, {
|
||||
Value: "publicRead",
|
||||
Help: "Object owner gets OWNER access, and all Users get READER access.",
|
||||
Help: "Object owner gets OWNER access.\nAll Users get READER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_acl",
|
||||
Help: "Access Control List for new buckets.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "authenticatedRead",
|
||||
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
|
||||
Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Project team owners get OWNER access [default if left blank].",
|
||||
Help: "Project team owners get OWNER access.\nDefault if left blank.",
|
||||
}, {
|
||||
Value: "projectPrivate",
|
||||
Help: "Project team members get access according to their roles.",
|
||||
}, {
|
||||
Value: "publicRead",
|
||||
Help: "Project team owners get OWNER access, and all Users get READER access.",
|
||||
Help: "Project team owners get OWNER access.\nAll Users get READER access.",
|
||||
}, {
|
||||
Value: "publicReadWrite",
|
||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||
Help: "Project team owners get OWNER access.\nAll Users get WRITER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_policy_only",
|
||||
@@ -164,64 +163,64 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Help: "Location for the newly created buckets.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Empty for default location (US).",
|
||||
Help: "Empty for default location (US)",
|
||||
}, {
|
||||
Value: "asia",
|
||||
Help: "Multi-regional location for Asia.",
|
||||
Help: "Multi-regional location for Asia",
|
||||
}, {
|
||||
Value: "eu",
|
||||
Help: "Multi-regional location for Europe.",
|
||||
Help: "Multi-regional location for Europe",
|
||||
}, {
|
||||
Value: "us",
|
||||
Help: "Multi-regional location for United States.",
|
||||
Help: "Multi-regional location for United States",
|
||||
}, {
|
||||
Value: "asia-east1",
|
||||
Help: "Taiwan.",
|
||||
Help: "Taiwan",
|
||||
}, {
|
||||
Value: "asia-east2",
|
||||
Help: "Hong Kong.",
|
||||
Help: "Hong Kong",
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo.",
|
||||
Help: "Tokyo",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai.",
|
||||
Help: "Mumbai",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore.",
|
||||
Help: "Singapore",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney.",
|
||||
Help: "Sydney",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland.",
|
||||
Help: "Finland",
|
||||
}, {
|
||||
Value: "europe-west1",
|
||||
Help: "Belgium.",
|
||||
Help: "Belgium",
|
||||
}, {
|
||||
Value: "europe-west2",
|
||||
Help: "London.",
|
||||
Help: "London",
|
||||
}, {
|
||||
Value: "europe-west3",
|
||||
Help: "Frankfurt.",
|
||||
Help: "Frankfurt",
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands.",
|
||||
Help: "Netherlands",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa.",
|
||||
Help: "Iowa",
|
||||
}, {
|
||||
Value: "us-east1",
|
||||
Help: "South Carolina.",
|
||||
Help: "South Carolina",
|
||||
}, {
|
||||
Value: "us-east4",
|
||||
Help: "Northern Virginia.",
|
||||
Help: "Northern Virginia",
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon.",
|
||||
Help: "Oregon",
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California.",
|
||||
Help: "California",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -329,7 +328,10 @@ func (f *Fs) Features() *fs.Features {
|
||||
}
|
||||
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
func shouldRetry(ctx context.Context, err error) (again bool, errOut error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
again = false
|
||||
if err != nil {
|
||||
if fserrors.ShouldRetry(err) {
|
||||
@@ -370,12 +372,12 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
return nil, fmt.Errorf("error processing credentials: %w", err)
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
||||
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
@@ -386,8 +388,7 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
|
||||
// Parse config into Options struct
|
||||
@@ -407,24 +408,24 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if opt.Anonymous {
|
||||
oAuthClient = &http.Client{}
|
||||
oAuthClient = fshttp.NewClient(ctx)
|
||||
} else if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
|
||||
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
ctx := context.Background()
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -433,7 +434,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
@@ -442,13 +443,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
f.svc, err = storage.New(f.client)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
||||
}
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
@@ -456,7 +457,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
@@ -522,7 +523,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
@@ -565,7 +566,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size == 0 {
|
||||
if isDirectory {
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
@@ -625,7 +626,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -751,17 +752,17 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
return nil
|
||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code != http.StatusNotFound {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
return fmt.Errorf("failed to get bucket: %w", err)
|
||||
}
|
||||
} else {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
return fmt.Errorf("failed to get bucket: %w", err)
|
||||
}
|
||||
|
||||
if f.opt.ProjectNumber == "" {
|
||||
@@ -786,7 +787,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
@@ -803,7 +804,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -813,7 +814,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
@@ -841,20 +842,27 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if rewriteResponse.Done {
|
||||
break
|
||||
}
|
||||
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
|
||||
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
dstObj.setMetaData(newObject)
|
||||
dstObj.setMetaData(rewriteResponse.Resource)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
@@ -913,7 +921,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
// read mtime out of metadata if available
|
||||
mtimeString, ok := info.Metadata[metaMtime]
|
||||
if ok {
|
||||
modTime, err := time.Parse(timeFormatIn, mtimeString)
|
||||
modTime, err := time.Parse(timeFormat, mtimeString)
|
||||
if err == nil {
|
||||
o.modTime = modTime
|
||||
return
|
||||
@@ -921,8 +929,19 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
||||
}
|
||||
|
||||
// Fallback to GSUtil mtime
|
||||
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
|
||||
if ok {
|
||||
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
|
||||
if err == nil {
|
||||
o.modTime = time.Unix(unixTimeSec, 0)
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
|
||||
}
|
||||
|
||||
// Fallback to the Updated time
|
||||
modTime, err := time.Parse(timeFormatIn, info.Updated)
|
||||
modTime, err := time.Parse(timeFormat, info.Updated)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Bad time decode: %v", err)
|
||||
} else {
|
||||
@@ -935,7 +954,7 @@ func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, er
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
@@ -979,7 +998,8 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
// Returns metadata for an object
|
||||
func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
metadata := make(map[string]string, 1)
|
||||
metadata[metaMtime] = modTime.Format(timeFormatOut)
|
||||
metadata[metaMtime] = modTime.Format(timeFormat)
|
||||
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
||||
return metadata
|
||||
}
|
||||
|
||||
@@ -991,11 +1011,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
return err
|
||||
}
|
||||
// Add the mtime to the existing metadata
|
||||
mtime := modTime.Format(timeFormatOut)
|
||||
if object.Metadata == nil {
|
||||
object.Metadata = make(map[string]string, 1)
|
||||
}
|
||||
object.Metadata[metaMtime] = mtime
|
||||
object.Metadata[metaMtime] = modTime.Format(timeFormat)
|
||||
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
||||
// Copy the object to itself to update the metadata
|
||||
// Using PATCH requires too many permissions
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -1006,7 +1026,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1022,11 +1042,10 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
req, err := http.NewRequest("GET", o.url, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
@@ -1038,7 +1057,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
_ = res.Body.Close() // ignore error
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1046,7 +1065,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
_, isRanging := req.Header["Range"]
|
||||
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
|
||||
_ = res.Body.Close() // ignore error
|
||||
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||
return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
@@ -1085,6 +1104,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
object.ContentLanguage = value
|
||||
case "content-type":
|
||||
object.ContentType = value
|
||||
case "x-goog-storage-class":
|
||||
object.StorageClass = value
|
||||
default:
|
||||
const googMetaPrefix = "x-goog-meta-"
|
||||
if strings.HasPrefix(lowerKey, googMetaPrefix) {
|
||||
@@ -1102,7 +1123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1117,7 +1138,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,9 +6,9 @@ package googlephotos
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
golog "log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -18,9 +18,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -54,6 +55,7 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
scopeAccess = 2 // position of access scope in list
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -62,7 +64,7 @@ var (
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite,
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
@@ -78,36 +80,36 @@ func init() {
|
||||
Prefix: "gphotos",
|
||||
Description: "Google Photos",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[0] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[0] = scopeReadWrite
|
||||
switch config.State {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
case "warning":
|
||||
// Warn the user as required by google photos integration
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
|
||||
IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
are stored in full resolution at original quality. These uploads
|
||||
will count towards storage in your Google Account.`)
|
||||
case "warning_done":
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Do the oauth
|
||||
err = oauthutil.Config("google photos", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
|
||||
// Warn the user
|
||||
fmt.Print(`
|
||||
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
*** are stored in full resolution at original quality. These uploads
|
||||
*** will count towards storage in your Google Account.
|
||||
|
||||
`)
|
||||
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "read_only",
|
||||
@@ -130,17 +132,43 @@ you want to read the media.`,
|
||||
}, {
|
||||
Name: "start_year",
|
||||
Default: 2000,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "include_archived",
|
||||
Default: false,
|
||||
Help: `Also view and download archived media.
|
||||
|
||||
By default, rclone does not request archived media. Thus, when syncing,
|
||||
archived media is not visible in directory listings or transferred.
|
||||
|
||||
Note that media in albums is always visible and synced, no matter
|
||||
their archive status.
|
||||
|
||||
With this flag, archived media are always visible in directory
|
||||
listings and transferred.
|
||||
|
||||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
IncludeArchived bool `config:"include_archived"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -206,6 +234,10 @@ func (f *Fs) startYear() int {
|
||||
return f.opt.StartYear
|
||||
}
|
||||
|
||||
func (f *Fs) includeArchived() bool {
|
||||
return f.opt.IncludeArchived
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
@@ -218,7 +250,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -246,7 +281,7 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -254,10 +289,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
}
|
||||
|
||||
root = strings.Trim(path.Clean(root), "/")
|
||||
@@ -272,14 +307,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
unAuth: rest.NewClient(baseClient),
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
ts: ts,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
startTime: time.Now(),
|
||||
albums: map[bool]*albums{},
|
||||
uploaded: dirtree.New(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
_, _, pattern := patterns.match(f.root, "", true)
|
||||
@@ -288,7 +323,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var leaf string
|
||||
f.root, leaf = path.Split(f.root)
|
||||
f.root = strings.TrimRight(f.root, "/")
|
||||
_, err := f.NewObject(context.TODO(), leaf)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err == nil {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -307,16 +342,16 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
||||
var openIDconfig map[string]interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't read openID config")
|
||||
return "", fmt.Errorf("couldn't read openID config: %w", err)
|
||||
}
|
||||
|
||||
// Find userinfo endpoint
|
||||
endpoint, ok := openIDconfig[name].(string)
|
||||
if !ok {
|
||||
return "", errors.Errorf("couldn't find %q from openID config", name)
|
||||
return "", fmt.Errorf("couldn't find %q from openID config", name)
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
@@ -336,10 +371,10 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read user info")
|
||||
return nil, fmt.Errorf("couldn't read user info: %w", err)
|
||||
}
|
||||
return userInfo, nil
|
||||
}
|
||||
@@ -367,10 +402,10 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
var res interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't revoke token")
|
||||
return fmt.Errorf("couldn't revoke token: %w", err)
|
||||
}
|
||||
fs.Infof(f, "res = %+v", res)
|
||||
return nil
|
||||
@@ -454,10 +489,10 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list albums")
|
||||
return nil, fmt.Errorf("couldn't list albums: %w", err)
|
||||
}
|
||||
newAlbums := result.Albums
|
||||
if shared {
|
||||
@@ -471,7 +506,9 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
||||
lastID = newAlbums[len(newAlbums)-1].ID
|
||||
}
|
||||
for i := range newAlbums {
|
||||
all.add(&newAlbums[i])
|
||||
anAlbum := newAlbums[i]
|
||||
anAlbum.Title = f.opt.Enc.FromStandardPath(anAlbum.Title)
|
||||
all.add(&anAlbum)
|
||||
}
|
||||
if result.NextPageToken == "" {
|
||||
break
|
||||
@@ -497,16 +534,22 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
}
|
||||
filter.PageSize = listChunks
|
||||
filter.PageToken = ""
|
||||
if filter.AlbumID == "" { // album ID and filters cannot be set together, else error 400 INVALID_ARGUMENT
|
||||
if filter.Filters == nil {
|
||||
filter.Filters = &api.Filters{}
|
||||
}
|
||||
filter.Filters.IncludeArchivedMedia = &f.opt.IncludeArchived
|
||||
}
|
||||
lastID := ""
|
||||
for {
|
||||
var result api.MediaItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
return fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
items := result.MediaItems
|
||||
if len(items) > 0 && items[0].ID == lastID {
|
||||
@@ -647,10 +690,10 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create album")
|
||||
return nil, fmt.Errorf("couldn't create album: %w", err)
|
||||
}
|
||||
f.albums[false].add(&result)
|
||||
return &result, nil
|
||||
@@ -782,7 +825,7 @@ func (o *Object) Size() int64 {
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Reading size failed: %v", err)
|
||||
@@ -833,10 +876,10 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't get media item")
|
||||
return fmt.Errorf("couldn't get media item: %w", err)
|
||||
}
|
||||
o.setMetaData(&item)
|
||||
return nil
|
||||
@@ -910,7 +953,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -965,13 +1008,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
token, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't upload file")
|
||||
return fmt.Errorf("couldn't upload file: %w", err)
|
||||
}
|
||||
uploadToken := strings.TrimSpace(string(token))
|
||||
if uploadToken == "" {
|
||||
@@ -996,17 +1039,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var result api.BatchCreateResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create media item")
|
||||
return fmt.Errorf("failed to create media item: %w", err)
|
||||
}
|
||||
if len(result.NewMediaItemResults) != 1 {
|
||||
return errors.New("bad response to BatchCreate wrong number of items")
|
||||
}
|
||||
mediaItemResult := result.NewMediaItemResults[0]
|
||||
if mediaItemResult.Status.Code != 0 {
|
||||
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||
}
|
||||
o.setMetaData(&mediaItemResult.MediaItem)
|
||||
|
||||
@@ -1028,7 +1071,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
albumTitle, fileName := match[1], match[2]
|
||||
album, ok := o.fs.albums[false].get(albumTitle)
|
||||
if !ok {
|
||||
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
||||
return fmt.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -1041,10 +1084,10 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't delete item from album")
|
||||
return fmt.Errorf("couldn't delete item from album: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -35,14 +35,14 @@ func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(*fstest.RemoteName)
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create local Fs pointing at testfiles
|
||||
localFs, err := fs.NewFs("testfiles")
|
||||
localFs, err := fs.NewFs(ctx, "testfiles")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CreateAlbum", func(t *testing.T) {
|
||||
@@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
// Check it is there in the date/month/year heirachy
|
||||
// Check it is there in the date/month/year hierarchy
|
||||
// 2013-07-13 is the creation date of the folder
|
||||
checkPresent := func(t *testing.T, objPath string) {
|
||||
entries, err := f.List(ctx, objPath)
|
||||
@@ -155,7 +155,7 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("NewFsIsFile", func(t *testing.T) {
|
||||
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
|
||||
fNew, err := fs.NewFs(ctx, *fstest.RemoteName+remote)
|
||||
assert.Equal(t, fs.ErrorIsFile, err)
|
||||
leaf := path.Base(remote)
|
||||
o, err := fNew.NewObject(ctx, leaf)
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
@@ -24,6 +23,7 @@ type lister interface {
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
startYear() int
|
||||
includeArchived() bool
|
||||
}
|
||||
|
||||
// dirPattern describes a single directory pattern
|
||||
@@ -269,7 +269,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
|
||||
year := match[1]
|
||||
current, err := time.Parse("2006", year)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("bad year %q", match[1])
|
||||
return nil, fmt.Errorf("bad year %q", match[1])
|
||||
}
|
||||
currentYear := current.Year()
|
||||
for current.Year() == currentYear {
|
||||
@@ -283,7 +283,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
|
||||
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
||||
year, err := strconv.Atoi(match[1])
|
||||
if err != nil || year < 1000 || year > 3000 {
|
||||
return sf, errors.Errorf("bad year %q", match[1])
|
||||
return sf, fmt.Errorf("bad year %q", match[1])
|
||||
}
|
||||
sf = api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
@@ -299,14 +299,14 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
||||
if len(match) >= 3 {
|
||||
month, err := strconv.Atoi(match[2])
|
||||
if err != nil || month < 1 || month > 12 {
|
||||
return sf, errors.Errorf("bad month %q", match[2])
|
||||
return sf, fmt.Errorf("bad month %q", match[2])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Month = month
|
||||
}
|
||||
if len(match) >= 4 {
|
||||
day, err := strconv.Atoi(match[3])
|
||||
if err != nil || day < 1 || day > 31 {
|
||||
return sf, errors.Errorf("bad day %q", match[3])
|
||||
return sf, fmt.Errorf("bad day %q", match[3])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Day = day
|
||||
}
|
||||
|
||||
@@ -64,6 +64,11 @@ func (f *testLister) startYear() int {
|
||||
return 2000
|
||||
}
|
||||
|
||||
// mock includeArchived for testing
|
||||
func (f *testLister) includeArchived() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func TestPatternMatch(t *testing.T) {
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
|
||||
180
backend/hasher/commands.go
Normal file
180
backend/hasher/commands.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "drop":
|
||||
return nil, f.db.Stop(true)
|
||||
case "dump", "fulldump":
|
||||
return nil, f.dbDump(ctx, name == "fulldump", "")
|
||||
case "import", "stickyimport":
|
||||
sticky := name == "stickyimport"
|
||||
if len(arg) != 2 {
|
||||
return nil, errors.New("please provide checksum type and path to sum file")
|
||||
}
|
||||
return nil, f.dbImport(ctx, arg[0], arg[1], sticky)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "drop",
|
||||
Short: "Drop cache",
|
||||
Long: `Completely drop checksum cache.
|
||||
Usage Example:
|
||||
rclone backend drop hasher:
|
||||
`,
|
||||
}, {
|
||||
Name: "dump",
|
||||
Short: "Dump the database",
|
||||
Long: "Dump cache records covered by the current remote",
|
||||
}, {
|
||||
Name: "fulldump",
|
||||
Short: "Full dump of the database",
|
||||
Long: "Dump all cache records in the database",
|
||||
}, {
|
||||
Name: "import",
|
||||
Short: "Import a SUM file",
|
||||
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
||||
Usage Example:
|
||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
||||
`,
|
||||
}, {
|
||||
Name: "stickyimport",
|
||||
Short: "Perform fast import of a SUM file",
|
||||
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
||||
Usage Example:
|
||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
||||
`,
|
||||
}}
|
||||
|
||||
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
||||
if root == "" {
|
||||
remoteFs, err := cache.Get(ctx, f.opt.Remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
||||
}
|
||||
op := &kvDump{
|
||||
full: full,
|
||||
root: root,
|
||||
path: f.db.Path(),
|
||||
fs: f,
|
||||
}
|
||||
err := f.db.Do(false, op)
|
||||
if err == kv.ErrEmpty {
|
||||
fs.Infof(op.path, "empty")
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bool) error {
|
||||
var hashType hash.Type
|
||||
if err := hashType.Set(hashName); err != nil {
|
||||
return err
|
||||
}
|
||||
if hashType == hash.None {
|
||||
return errors.New("please provide a valid hash type")
|
||||
}
|
||||
if !f.suppHashes.Contains(hashType) {
|
||||
return errors.New("unsupported hash type")
|
||||
}
|
||||
if !f.keepHashes.Contains(hashType) {
|
||||
fs.Infof(nil, "Need not import hashes of this type")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, sumPath, err := fspath.SplitFs(sumRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sumFs, err := cache.Get(ctx, sumRemote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
// ok
|
||||
case nil:
|
||||
return fmt.Errorf("not a file: %s", sumRemote)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open sum file: %w", err)
|
||||
}
|
||||
hashes, err := operations.ParseSumFile(ctx, sumObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse sum file: %w", err)
|
||||
}
|
||||
|
||||
if sticky {
|
||||
rootPath := f.Fs.Root()
|
||||
for remote, hashVal := range hashes {
|
||||
key := path.Join(rootPath, remote)
|
||||
hashSums := operations.HashSums{hashName: hashVal}
|
||||
if err := f.putRawHashes(ctx, key, anyFingerprint, hashSums); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
}
|
||||
fs.Infof(nil, "Summary: %d checksum(s) imported", len(hashes))
|
||||
return nil
|
||||
}
|
||||
|
||||
const longImportThreshold = 100
|
||||
if len(hashes) > longImportThreshold {
|
||||
fs.Infof(nil, "Importing %d checksums. Please wait...", len(hashes))
|
||||
}
|
||||
|
||||
doneCount := 0
|
||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||
remote := obj.Remote()
|
||||
hash := hashes[remote]
|
||||
hashes[remote] = "" // mark as handled
|
||||
o, ok := obj.(*Object)
|
||||
if ok && hash != "" {
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Import failed: %v", err)
|
||||
}
|
||||
skipCount := 0
|
||||
for remote, emptyOrDone := range hashes {
|
||||
if emptyOrDone != "" {
|
||||
fs.Infof(nil, "Skip vanished object: %s", remote)
|
||||
skipCount++
|
||||
}
|
||||
}
|
||||
fs.Infof(nil, "Summary: %d imported, %d skipped", doneCount, skipCount)
|
||||
return err
|
||||
}
|
||||
508
backend/hasher/hasher.go
Normal file
508
backend/hasher/hasher.go
Normal file
@@ -0,0 +1,508 @@
|
||||
// Package hasher implements a checksum handling overlay backend
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "hasher",
|
||||
Description: "Better checksums for other remotes",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Required: true,
|
||||
Help: "Remote to cache checksums for (e.g. myRemote:path).",
|
||||
}, {
|
||||
Name: "hashes",
|
||||
Default: fs.CommaSepList{"md5", "sha1"},
|
||||
Advanced: false,
|
||||
Help: "Comma separated list of supported checksum types.",
|
||||
}, {
|
||||
Name: "max_age",
|
||||
Advanced: false,
|
||||
Default: fs.DurationOff,
|
||||
Help: "Maximum time to keep checksums in cache (0 = no cache, off = cache forever).",
|
||||
}, {
|
||||
Name: "auto_size",
|
||||
Advanced: true,
|
||||
Default: fs.SizeSuffix(0),
|
||||
Help: "Auto-update checksum for files smaller than this size (disabled by default).",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
Hashes fs.CommaSepList `config:"hashes"`
|
||||
AutoSize fs.SizeSuffix `config:"auto_size"`
|
||||
MaxAge fs.Duration `config:"max_age"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
name string
|
||||
root string
|
||||
wrapper fs.Fs
|
||||
features *fs.Features
|
||||
opt *Options
|
||||
db *kv.DB
|
||||
// fingerprinting
|
||||
fpTime bool // true if using time in fingerprints
|
||||
fpHash hash.Type // hash type to use in fingerprints or None
|
||||
// hash types triaged by groups
|
||||
suppHashes hash.Set // all supported checksum types
|
||||
passHashes hash.Set // passed directly to the base without caching
|
||||
slowHashes hash.Set // passed to the base and then cached
|
||||
autoHashes hash.Set // calculated in-house and cached
|
||||
keepHashes hash.Set // checksums to keep in cache (slow + auto)
|
||||
}
|
||||
|
||||
var warnExperimental sync.Once
|
||||
|
||||
// NewFs constructs an Fs from the remote:path string
|
||||
func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs.Fs, error) {
|
||||
if !kv.Supported() {
|
||||
return nil, errors.New("hasher is not supported on this OS")
|
||||
}
|
||||
warnExperimental.Do(func() {
|
||||
fs.Infof(nil, "Hasher is EXPERIMENTAL!")
|
||||
})
|
||||
|
||||
opt := &Options{}
|
||||
err := configstruct.Set(cmap, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(opt.Remote, fsname+":") {
|
||||
return nil, errors.New("can't point remote at itself")
|
||||
}
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
|
||||
baseFs, err := cache.Get(ctx, remotePath)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
Fs: baseFs,
|
||||
name: fsname,
|
||||
root: rpath,
|
||||
opt: opt,
|
||||
}
|
||||
baseFeatures := baseFs.Features()
|
||||
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
|
||||
|
||||
if baseFeatures.SlowHash {
|
||||
f.slowHashes = f.Fs.Hashes()
|
||||
} else {
|
||||
f.passHashes = f.Fs.Hashes()
|
||||
f.fpHash = f.passHashes.GetOne()
|
||||
}
|
||||
|
||||
f.suppHashes = f.passHashes
|
||||
f.suppHashes.Add(f.slowHashes.Array()...)
|
||||
|
||||
for _, hashName := range opt.Hashes {
|
||||
var ht hash.Type
|
||||
if err := ht.Set(hashName); err != nil {
|
||||
return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
|
||||
}
|
||||
if !f.slowHashes.Contains(ht) {
|
||||
f.autoHashes.Add(ht)
|
||||
}
|
||||
f.keepHashes.Add(ht)
|
||||
f.suppHashes.Add(ht)
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Groups by usage: cached %s, passed %s, auto %s, slow %s, supported %s",
|
||||
f.keepHashes, f.passHashes, f.autoHashes, f.slowHashes, f.suppHashes)
|
||||
|
||||
var nilSet hash.Set
|
||||
if f.keepHashes == nilSet {
|
||||
return nil, errors.New("configured hash_names have nothing to keep in cache")
|
||||
}
|
||||
|
||||
if f.opt.MaxAge > 0 {
|
||||
gob.Register(hashRecord{})
|
||||
db, err := kv.Start(ctx, "hasher", f.Fs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.db = db
|
||||
}
|
||||
|
||||
stubFeatures := &fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
//
|
||||
// Filesystem
|
||||
//
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string { return f.name }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string { return f.root }
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features { return f.features }
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set { return f.suppHashes }
|
||||
|
||||
// String returns a description of the FS
|
||||
// The "hasher::" prefix is a distinctive feature.
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("hasher::%s:%s", f.name, f.root)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs { return f.Fs }
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs { return f.wrapper }
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper }
|
||||
|
||||
// Wrap base entries into hasher entries.
|
||||
func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries, err error) {
|
||||
hashEntries = baseEntries[:0] // work inplace
|
||||
for _, entry := range baseEntries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
hashEntries = append(hashEntries, f.wrapObject(x, nil))
|
||||
default:
|
||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
||||
}
|
||||
}
|
||||
return hashEntries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if entries, err = f.Fs.List(ctx, dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapEntries(entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories recursively into out.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
return f.Fs.Features().ListR(ctx, dir, func(baseEntries fs.DirEntries) error {
|
||||
hashEntries, err := f.wrapEntries(baseEntries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(hashEntries)
|
||||
})
|
||||
}
|
||||
|
||||
// Purge a directory
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if do := f.Fs.Features().Purge; do != nil {
|
||||
if err := do(ctx, dir); err != nil {
|
||||
return err
|
||||
}
|
||||
err := f.db.Do(true, &kvPurge{
|
||||
dir: path.Join(f.Fs.Root(), dir),
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to purge some hashes: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with undeterminate size.
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if do := f.Fs.Features().PutStream; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
return nil, errors.New("PutStream not supported")
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object, allowing duplicates.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
return nil, errors.New("PutUnchecked not supported")
|
||||
}
|
||||
|
||||
// pruneHash deletes hash for a path
|
||||
func (f *Fs) pruneHash(remote string) error {
|
||||
return f.db.Do(true, &kvPrune{
|
||||
key: path.Join(f.Fs.Root(), remote),
|
||||
})
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
if do := f.Fs.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return errors.New("CleanUp not supported")
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if do := f.Fs.Features().About; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
if do := f.Fs.Features().ChangeNotify; do != nil {
|
||||
do(ctx, notifyFunc, pollIntervalChan)
|
||||
}
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
if do := f.Fs.Features().UserInfo; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
if do := f.Fs.Features().Disconnect; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if do := f.Fs.Features().MergeDirs; do != nil {
|
||||
return do(ctx, dirs)
|
||||
}
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
if do := f.Fs.Features().DirCacheFlush; do != nil {
|
||||
do()
|
||||
}
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
if do := f.Fs.Features().PublicLink; do != nil {
|
||||
return do(ctx, remote, expire, unlink)
|
||||
}
|
||||
return "", errors.New("PublicLink not supported")
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Move
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_ = f.db.Do(true, &kvMove{
|
||||
src: path.Join(f.Fs.Root(), src.Remote()),
|
||||
dst: path.Join(f.Fs.Root(), remote),
|
||||
dir: false,
|
||||
fs: f,
|
||||
})
|
||||
return f.wrapObject(oResult, nil), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
do := f.Fs.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
err := do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
||||
if err == nil {
|
||||
_ = f.db.Do(true, &kvMove{
|
||||
src: path.Join(srcFs.Fs.Root(), srcRemote),
|
||||
dst: path.Join(f.Fs.Root(), dstRemote),
|
||||
dir: true,
|
||||
fs: f,
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
err = f.db.Stop(false)
|
||||
if do := f.Fs.Features().Shutdown; do != nil {
|
||||
if err2 := do(ctx); err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, remote)
|
||||
return f.wrapObject(o, err), err
|
||||
}
|
||||
|
||||
//
|
||||
// Object
|
||||
//
|
||||
|
||||
// Object represents a composite file wrapping one or more data chunks
|
||||
type Object struct {
|
||||
fs.Object
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// Wrap base object into hasher object
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
|
||||
if err != nil || o == nil {
|
||||
return nil
|
||||
}
|
||||
return &Object{Object: o, f: f}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info { return o.f }
|
||||
|
||||
// UnWrap returns the wrapped Object
|
||||
func (o *Object) UnWrap() fs.Object { return o.Object }
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Object.String()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if possible
|
||||
func (o *Object) ID() string {
|
||||
if doer, ok := o.Object.(fs.IDer); ok {
|
||||
return doer.ID()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetTier returns the Tier of the Object if possible
|
||||
func (o *Object) GetTier() string {
|
||||
if doer, ok := o.Object.(fs.GetTierer); ok {
|
||||
return doer.GetTier()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// SetTier set the Tier of the Object if possible
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
if doer, ok := o.Object.(fs.SetTierer); ok {
|
||||
return doer.SetTier(tier)
|
||||
}
|
||||
return errors.New("SetTier not supported")
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
if doer, ok := o.Object.(fs.MimeTyper); ok {
|
||||
return doer.MimeType(ctx)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
78
backend/hasher/hasher_internal_test.go
Normal file
78
backend/hasher/hasher_internal_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
|
||||
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
||||
require.NotNil(t, o)
|
||||
return o
|
||||
}
|
||||
|
||||
func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
||||
// make a temporary local remote
|
||||
tempRoot, err := fstest.LocalRemote()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempRoot)
|
||||
}()
|
||||
|
||||
// make a temporary crypt remote
|
||||
ctx := context.Background()
|
||||
pass := obscure.MustObscure("crypt")
|
||||
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
|
||||
cryptFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make a test file on the crypt remote
|
||||
const dirName = "from_crypt_1"
|
||||
const fileName = dirName + "/file_from_crypt_1"
|
||||
const longTime = fs.ModTimeNotSupported
|
||||
src := putFile(ctx, t, cryptFs, fileName, "doggy froggy")
|
||||
|
||||
// ensure that hash does not exist yet
|
||||
_ = f.pruneHash(fileName)
|
||||
hashType := f.keepHashes.GetOne()
|
||||
hash, err := f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, hash)
|
||||
|
||||
// upload file to hasher
|
||||
in, err := src.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dst, err := f.Put(ctx, in, src)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, dst)
|
||||
|
||||
// check that hash was created
|
||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, hash)
|
||||
//t.Logf("hash is %q", hash)
|
||||
_ = operations.Purge(ctx, f, dirName)
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
if !kv.Supported() {
|
||||
t.Skip("hasher is not supported on this OS")
|
||||
}
|
||||
t.Run("UploadFromCrypt", f.testUploadFromCrypt)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
38
backend/hasher/hasher_test.go
Normal file
38
backend/hasher/hasher_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package hasher_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hasher"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // for integration tests
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if !kv.Supported() {
|
||||
t.Skip("hasher is not supported on this OS")
|
||||
}
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*hasher.Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{},
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: "TestHasher", Key: "type", Value: "hasher"},
|
||||
{Name: "TestHasher", Key: "remote", Value: tempDir},
|
||||
}
|
||||
opt.RemoteName = "TestHasher:"
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
315
backend/hasher/kv.go
Normal file
315
backend/hasher/kv.go
Normal file
@@ -0,0 +1,315 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
anyFingerprint = "*"
|
||||
)
|
||||
|
||||
type hashMap map[hash.Type]string
|
||||
|
||||
type hashRecord struct {
|
||||
Fp string // fingerprint
|
||||
Hashes operations.HashSums
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
func (r *hashRecord) encode(key string) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := gob.NewEncoder(&buf).Encode(r); err != nil {
|
||||
fs.Debugf(key, "hasher encoding %v: %v", r, err)
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (r *hashRecord) decode(key string, data []byte) error {
|
||||
if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(r); err != nil {
|
||||
fs.Debugf(key, "hasher decoding %q failed: %v", data, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// kvPrune: prune a single hash
|
||||
type kvPrune struct {
|
||||
key string
|
||||
}
|
||||
|
||||
func (op *kvPrune) Do(ctx context.Context, b kv.Bucket) error {
|
||||
return b.Delete([]byte(op.key))
|
||||
}
|
||||
|
||||
// kvPurge: delete a subtree
|
||||
type kvPurge struct {
|
||||
dir string
|
||||
}
|
||||
|
||||
func (op *kvPurge) Do(ctx context.Context, b kv.Bucket) error {
|
||||
dir := op.dir
|
||||
if !strings.HasSuffix(dir, "/") {
|
||||
dir += "/"
|
||||
}
|
||||
var items []string
|
||||
cur := b.Cursor()
|
||||
bkey, _ := cur.Seek([]byte(dir))
|
||||
for bkey != nil {
|
||||
key := string(bkey)
|
||||
if !strings.HasPrefix(key, dir) {
|
||||
break
|
||||
}
|
||||
items = append(items, key[len(dir):])
|
||||
bkey, _ = cur.Next()
|
||||
}
|
||||
nerr := 0
|
||||
for _, sub := range items {
|
||||
if err := b.Delete([]byte(dir + sub)); err != nil {
|
||||
nerr++
|
||||
}
|
||||
}
|
||||
fs.Debugf(dir, "%d hashes purged, %d failed", len(items)-nerr, nerr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// kvMove: assign hashes to new path
|
||||
type kvMove struct {
|
||||
src string
|
||||
dst string
|
||||
dir bool
|
||||
fs *Fs
|
||||
}
|
||||
|
||||
func (op *kvMove) Do(ctx context.Context, b kv.Bucket) error {
|
||||
src, dst := op.src, op.dst
|
||||
if !op.dir {
|
||||
err := moveHash(b, src, dst)
|
||||
fs.Debugf(op.fs, "moving cached hash %s to %s (err: %v)", src, dst, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(src, "/") {
|
||||
src += "/"
|
||||
}
|
||||
if !strings.HasSuffix(dst, "/") {
|
||||
dst += "/"
|
||||
}
|
||||
|
||||
var items []string
|
||||
cur := b.Cursor()
|
||||
bkey, _ := cur.Seek([]byte(src))
|
||||
for bkey != nil {
|
||||
key := string(bkey)
|
||||
if !strings.HasPrefix(key, src) {
|
||||
break
|
||||
}
|
||||
items = append(items, key[len(src):])
|
||||
bkey, _ = cur.Next()
|
||||
}
|
||||
|
||||
nerr := 0
|
||||
for _, suffix := range items {
|
||||
srcKey, dstKey := src+suffix, dst+suffix
|
||||
err := moveHash(b, srcKey, dstKey)
|
||||
fs.Debugf(op.fs, "Rename cache record %s -> %s (err: %v)", srcKey, dstKey, err)
|
||||
if err != nil {
|
||||
nerr++
|
||||
}
|
||||
}
|
||||
fs.Debugf(op.fs, "%d hashes moved, %d failed", len(items)-nerr, nerr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func moveHash(b kv.Bucket, src, dst string) error {
|
||||
data := b.Get([]byte(src))
|
||||
err := b.Delete([]byte(src))
|
||||
if err != nil || len(data) == 0 {
|
||||
return err
|
||||
}
|
||||
return b.Put([]byte(dst), data)
|
||||
}
|
||||
|
||||
// kvGet: get single hash from database
|
||||
type kvGet struct {
|
||||
key string
|
||||
fp string
|
||||
hash string
|
||||
val string
|
||||
age time.Duration
|
||||
}
|
||||
|
||||
func (op *kvGet) Do(ctx context.Context, b kv.Bucket) error {
|
||||
data := b.Get([]byte(op.key))
|
||||
if len(data) == 0 {
|
||||
return errors.New("no record")
|
||||
}
|
||||
var r hashRecord
|
||||
if err := r.decode(op.key, data); err != nil {
|
||||
return errors.New("invalid record")
|
||||
}
|
||||
if !(r.Fp == anyFingerprint || op.fp == anyFingerprint || r.Fp == op.fp) {
|
||||
return errors.New("fingerprint changed")
|
||||
}
|
||||
if time.Since(r.Created) > op.age {
|
||||
return errors.New("record timed out")
|
||||
}
|
||||
if r.Hashes != nil {
|
||||
op.val = r.Hashes[op.hash]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// kvPut: set hashes for an object by key
|
||||
type kvPut struct {
|
||||
key string
|
||||
fp string
|
||||
hashes operations.HashSums
|
||||
age time.Duration
|
||||
}
|
||||
|
||||
func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
|
||||
data := b.Get([]byte(op.key))
|
||||
var r hashRecord
|
||||
if len(data) > 0 {
|
||||
err = r.decode(op.key, data)
|
||||
if err != nil || r.Fp != op.fp || time.Since(r.Created) > op.age {
|
||||
r.Hashes = nil
|
||||
}
|
||||
}
|
||||
if len(r.Hashes) == 0 {
|
||||
r.Created = time.Now()
|
||||
r.Hashes = operations.HashSums{}
|
||||
r.Fp = op.fp
|
||||
}
|
||||
|
||||
for hashType, hashVal := range op.hashes {
|
||||
r.Hashes[hashType] = hashVal
|
||||
}
|
||||
if data, err = r.encode(op.key); err != nil {
|
||||
return fmt.Errorf("marshal failed: %w", err)
|
||||
}
|
||||
if err = b.Put([]byte(op.key), data); err != nil {
|
||||
return fmt.Errorf("put failed: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// kvDump: dump the database.
|
||||
// Note: long dump can cause concurrent operations to fail.
|
||||
type kvDump struct {
|
||||
full bool
|
||||
root string
|
||||
path string
|
||||
fs *Fs
|
||||
num int
|
||||
total int
|
||||
}
|
||||
|
||||
func (op *kvDump) Do(ctx context.Context, b kv.Bucket) error {
|
||||
f, baseRoot, dbPath := op.fs, op.root, op.path
|
||||
|
||||
if op.full {
|
||||
total := 0
|
||||
num := 0
|
||||
_ = b.ForEach(func(bkey, data []byte) error {
|
||||
total++
|
||||
key := string(bkey)
|
||||
include := (baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/"))
|
||||
var r hashRecord
|
||||
if err := r.decode(key, data); err != nil {
|
||||
fs.Errorf(nil, "%s: invalid record: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
fmt.Println(f.dumpLine(&r, key, include, nil))
|
||||
if include {
|
||||
num++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
fs.Infof(dbPath, "%d records out of %d", num, total)
|
||||
op.num, op.total = num, total // for unit tests
|
||||
return nil
|
||||
}
|
||||
|
||||
num := 0
|
||||
cur := b.Cursor()
|
||||
var bkey, data []byte
|
||||
if baseRoot != "" {
|
||||
bkey, data = cur.Seek([]byte(baseRoot))
|
||||
} else {
|
||||
bkey, data = cur.First()
|
||||
}
|
||||
for bkey != nil {
|
||||
key := string(bkey)
|
||||
if !(baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) {
|
||||
break
|
||||
}
|
||||
var r hashRecord
|
||||
if err := r.decode(key, data); err != nil {
|
||||
fs.Errorf(nil, "%s: invalid record: %v", key, err)
|
||||
continue
|
||||
}
|
||||
if key = strings.TrimPrefix(key[len(baseRoot):], "/"); key == "" {
|
||||
key = "/"
|
||||
}
|
||||
fmt.Println(f.dumpLine(&r, key, true, nil))
|
||||
num++
|
||||
bkey, data = cur.Next()
|
||||
}
|
||||
fs.Infof(dbPath, "%d records", num)
|
||||
op.num = num // for unit tests
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) string {
|
||||
var status string
|
||||
switch {
|
||||
case !include:
|
||||
status = "ext"
|
||||
case err != nil:
|
||||
status = "bad"
|
||||
case r.Fp == anyFingerprint:
|
||||
status = "stk"
|
||||
default:
|
||||
status = "ok "
|
||||
}
|
||||
|
||||
var hashes []string
|
||||
for _, hashType := range f.keepHashes.Array() {
|
||||
hashName := hashType.String()
|
||||
hashVal := r.Hashes[hashName]
|
||||
if hashVal == "" || err != nil {
|
||||
hashVal = "-"
|
||||
}
|
||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType), hashVal)
|
||||
hashes = append(hashes, hashName+":"+hashVal)
|
||||
}
|
||||
hashesStr := strings.Join(hashes, " ")
|
||||
|
||||
age := time.Since(r.Created).Round(time.Second)
|
||||
if age > 24*time.Hour {
|
||||
age = age.Round(time.Hour)
|
||||
}
|
||||
if err != nil {
|
||||
age = 0
|
||||
}
|
||||
ageStr := age.String()
|
||||
if strings.HasSuffix(ageStr, "h0m0s") {
|
||||
ageStr = strings.TrimSuffix(ageStr, "0m0s")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s %s %9s %s", status, hashesStr, ageStr, path)
|
||||
}
|
||||
305
backend/hasher/object.go
Normal file
305
backend/hasher/object.go
Normal file
@@ -0,0 +1,305 @@
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// obtain hash for an object
|
||||
func (o *Object) getHash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||
maxAge := time.Duration(o.f.opt.MaxAge)
|
||||
if maxAge <= 0 {
|
||||
return "", nil
|
||||
}
|
||||
fp := o.fingerprint(ctx)
|
||||
if fp == "" {
|
||||
return "", errors.New("fingerprint failed")
|
||||
}
|
||||
return o.f.getRawHash(ctx, hashType, o.Remote(), fp, maxAge)
|
||||
}
|
||||
|
||||
// obtain hash for a path
|
||||
func (f *Fs) getRawHash(ctx context.Context, hashType hash.Type, remote, fp string, age time.Duration) (string, error) {
|
||||
key := path.Join(f.Fs.Root(), remote)
|
||||
op := &kvGet{
|
||||
key: key,
|
||||
fp: fp,
|
||||
hash: hashType.String(),
|
||||
age: age,
|
||||
}
|
||||
err := f.db.Do(false, op)
|
||||
return op.val, err
|
||||
}
|
||||
|
||||
// put new hashes for an object
|
||||
func (o *Object) putHashes(ctx context.Context, rawHashes hashMap) error {
|
||||
if o.f.opt.MaxAge <= 0 {
|
||||
return nil
|
||||
}
|
||||
fp := o.fingerprint(ctx)
|
||||
if fp == "" {
|
||||
return nil
|
||||
}
|
||||
key := path.Join(o.f.Fs.Root(), o.Remote())
|
||||
hashes := operations.HashSums{}
|
||||
for hashType, hashVal := range rawHashes {
|
||||
hashes[hashType.String()] = hashVal
|
||||
}
|
||||
return o.f.putRawHashes(ctx, key, fp, hashes)
|
||||
}
|
||||
|
||||
// set hashes for a path without any validation
|
||||
func (f *Fs) putRawHashes(ctx context.Context, key, fp string, hashes operations.HashSums) error {
|
||||
return f.db.Do(true, &kvPut{
|
||||
key: key,
|
||||
fp: fp,
|
||||
hashes: hashes,
|
||||
age: time.Duration(f.opt.MaxAge),
|
||||
})
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file or "" if unavailable.
|
||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string, err error) {
|
||||
f := o.f
|
||||
if f.passHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "pass %s", hashType)
|
||||
return o.Object.Hash(ctx, hashType)
|
||||
}
|
||||
if !f.suppHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "unsupp %s", hashType)
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if hashVal, err = o.getHash(ctx, hashType); err != nil {
|
||||
fs.Debugf(o, "getHash: %v", err)
|
||||
err = nil
|
||||
hashVal = ""
|
||||
}
|
||||
if hashVal != "" {
|
||||
fs.Debugf(o, "cached %s = %q", hashType, hashVal)
|
||||
return hashVal, nil
|
||||
}
|
||||
if f.slowHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "slow %s", hashType)
|
||||
hashVal, err = o.Object.Hash(ctx, hashType)
|
||||
if err == nil && hashVal != "" && f.keepHashes.Contains(hashType) {
|
||||
if err = o.putHashes(ctx, hashMap{hashType: hashVal}); err != nil {
|
||||
fs.Debugf(o, "putHashes: %v", err)
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return hashVal, err
|
||||
}
|
||||
if f.autoHashes.Contains(hashType) && o.Size() < int64(f.opt.AutoSize) {
|
||||
_ = o.updateHashes(ctx)
|
||||
if hashVal, err = o.getHash(ctx, hashType); err != nil {
|
||||
fs.Debugf(o, "auto %s = %q (%v)", hashType, hashVal, err)
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return hashVal, err
|
||||
}
|
||||
|
||||
// updateHashes performs implicit "rclone hashsum --download" and updates cache.
|
||||
func (o *Object) updateHashes(ctx context.Context) error {
|
||||
r, err := o.Open(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(o, "update failed (open): %v", err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = r.Close()
|
||||
}()
|
||||
if _, err = io.Copy(ioutil.Discard, r); err != nil {
|
||||
fs.Infof(o, "update failed (copy): %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the object with the given data, time and size.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
_ = o.f.pruneHash(src.Remote())
|
||||
return o.Object.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Remove an object.
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
_ = o.f.pruneHash(o.Remote())
|
||||
return o.Object.Remove(ctx)
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the file.
|
||||
// Also prunes the cache entry when modtime changes so that
|
||||
// touching a file will trigger checksum recalculation even
|
||||
// on backends that don't provide modTime with fingerprint.
|
||||
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
if mtime != o.Object.ModTime(ctx) {
|
||||
_ = o.f.pruneHash(o.Remote())
|
||||
}
|
||||
return o.Object.SetModTime(ctx, mtime)
|
||||
}
|
||||
|
||||
// Open opens the file for read.
|
||||
// Full reads will also update object hashes.
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadCloser, err error) {
|
||||
size := o.Size()
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch opt := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = opt.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = opt.Decode(size)
|
||||
}
|
||||
}
|
||||
if offset < 0 {
|
||||
return nil, errors.New("invalid offset")
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = size - offset
|
||||
}
|
||||
if r, err = o.Object.Open(ctx, options...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if offset != 0 || limit < size {
|
||||
// It's a partial read
|
||||
return r, err
|
||||
}
|
||||
return o.f.newHashingReader(ctx, r, func(sums hashMap) {
|
||||
if err := o.putHashes(ctx, sums); err != nil {
|
||||
fs.Infof(o, "auto hashing error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Put data into the remote path with given modTime and size
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var (
|
||||
o *Object
|
||||
common hash.Set
|
||||
rehash bool
|
||||
hashes hashMap
|
||||
)
|
||||
if fsrc := src.Fs(); fsrc != nil {
|
||||
common = fsrc.Hashes().Overlap(f.keepHashes)
|
||||
// Rehash if source does not have all required hashes or hashing is slow
|
||||
rehash = fsrc.Features().SlowHash || common != f.keepHashes
|
||||
}
|
||||
|
||||
wrapIn := in
|
||||
if rehash {
|
||||
r, err := f.newHashingReader(ctx, in, func(sums hashMap) {
|
||||
hashes = sums
|
||||
})
|
||||
fs.Debugf(src, "Rehash in-fly due to incomplete or slow source set %v (err: %v)", common, err)
|
||||
if err == nil {
|
||||
wrapIn = r
|
||||
} else {
|
||||
rehash = false
|
||||
}
|
||||
}
|
||||
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
||||
o = f.wrapObject(oResult, err)
|
||||
if o == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !rehash {
|
||||
hashes = hashMap{}
|
||||
for _, ht := range common.Array() {
|
||||
if h, e := src.Hash(ctx, ht); e == nil && h != "" {
|
||||
hashes[ht] = h
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
err := o.putHashes(ctx, hashes)
|
||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
||||
}
|
||||
return o, err
|
||||
}
|
||||
|
||||
type hashingReader struct {
|
||||
rd io.Reader
|
||||
hasher *hash.MultiHasher
|
||||
fun func(hashMap)
|
||||
}
|
||||
|
||||
func (f *Fs) newHashingReader(ctx context.Context, rd io.Reader, fun func(hashMap)) (*hashingReader, error) {
|
||||
hasher, err := hash.NewMultiHasherTypes(f.keepHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hr := &hashingReader{
|
||||
rd: rd,
|
||||
hasher: hasher,
|
||||
fun: fun,
|
||||
}
|
||||
return hr, nil
|
||||
}
|
||||
|
||||
func (r *hashingReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.rd.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
r.hasher = nil
|
||||
}
|
||||
if r.hasher != nil {
|
||||
if _, errHash := r.hasher.Write(p[:n]); errHash != nil {
|
||||
r.hasher = nil
|
||||
err = errHash
|
||||
}
|
||||
}
|
||||
if err == io.EOF && r.hasher != nil {
|
||||
r.fun(r.hasher.Sums())
|
||||
r.hasher = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *hashingReader) Close() error {
|
||||
if rc, ok := r.rd.(io.ReadCloser); ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return object fingerprint or empty string in case of errors
|
||||
//
|
||||
// Note that we can't use the generic `fs.Fingerprint` here because
|
||||
// this fingerprint is used to pick _derived hashes_ that are slow
|
||||
// to calculate or completely unsupported by the base remote.
|
||||
//
|
||||
// The hasher fingerprint must be based on `fsHash`, the first _fast_
|
||||
// hash supported _by the underlying remote_ (if there is one),
|
||||
// while `fs.Fingerprint` would select a hash _produced by hasher_
|
||||
// creating unresolvable fingerprint loop.
|
||||
func (o *Object) fingerprint(ctx context.Context) string {
|
||||
size := o.Object.Size()
|
||||
timeStr := "-"
|
||||
if o.f.fpTime {
|
||||
timeStr = o.Object.ModTime(ctx).UTC().Format(timeFormat)
|
||||
if timeStr == "" {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
hashStr := "-"
|
||||
if o.f.fpHash != hash.None {
|
||||
var err error
|
||||
hashStr, err = o.Object.Hash(ctx, o.f.fpHash)
|
||||
if hashStr == "" || err != nil {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%d,%s,%s", size, timeStr, hashStr)
|
||||
}
|
||||
321
backend/hdfs/fs.go
Normal file
321
backend/hdfs/fs.go
Normal file
@@ -0,0 +1,321 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/colinmarc/hdfs/v2"
|
||||
krb "github.com/jcmturner/gokrb5/v8/client"
|
||||
"github.com/jcmturner/gokrb5/v8/config"
|
||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Fs represents a HDFS server
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
client *hdfs.Client
|
||||
}
|
||||
|
||||
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||
func getKerberosClient() (*krb.Client, error) {
|
||||
configPath := os.Getenv("KRB5_CONFIG")
|
||||
if configPath == "" {
|
||||
configPath = "/etc/krb5.conf"
|
||||
}
|
||||
|
||||
cfg, err := config.Load(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the ccache location from the environment, falling back to the
|
||||
// default location.
|
||||
ccachePath := os.Getenv("KRB5CCNAME")
|
||||
if strings.Contains(ccachePath, ":") {
|
||||
if strings.HasPrefix(ccachePath, "FILE:") {
|
||||
ccachePath = strings.SplitN(ccachePath, ":", 2)[1]
|
||||
} else {
|
||||
return nil, fmt.Errorf("unusable ccache: %s", ccachePath)
|
||||
}
|
||||
} else if ccachePath == "" {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
|
||||
}
|
||||
|
||||
ccache, err := credentials.LoadCCache(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := krb.NewFromCCache(ccache, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
options := hdfs.ClientOptions{
|
||||
Addresses: []string{opt.Namenode},
|
||||
UseDatanodeHostname: false,
|
||||
}
|
||||
|
||||
if opt.ServicePrincipalName != "" {
|
||||
options.KerberosClient, err = getKerberosClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
|
||||
}
|
||||
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
||||
|
||||
if opt.DataTransferProtection != "" {
|
||||
options.DataTransferProtection = opt.DataTransferProtection
|
||||
}
|
||||
} else {
|
||||
options.User = opt.Username
|
||||
}
|
||||
|
||||
client, err := hdfs.NewClient(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: fs.GetConfig(ctx),
|
||||
client: client,
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
info, err := f.client.Stat(f.realpath(""))
|
||||
if err == nil && !info.IsDir() {
|
||||
f.root = path.Dir(f.root)
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of this fs
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Hashes are not supported
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// NewObject finds file at remote or return fs.ErrorObjectNotFound
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
realpath := f.realpath(remote)
|
||||
fs.Debugf(f, "new [%s]", realpath)
|
||||
|
||||
info, err := f.ensureFile(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: info.Size(),
|
||||
modTime: info.ModTime(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
realpath := f.realpath(dir)
|
||||
fs.Debugf(f, "list [%s]", realpath)
|
||||
|
||||
err = f.ensureDirectory(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
list, err := f.client.ReadDir(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, x := range list {
|
||||
stdName := f.opt.Enc.ToStandardName(x.Name())
|
||||
remote := path.Join(dir, stdName)
|
||||
if x.IsDir() {
|
||||
entries = append(entries, fs.NewDir(remote, x.ModTime()))
|
||||
} else {
|
||||
entries = append(entries, &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: x.Size(),
|
||||
modTime: x.ModTime()})
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
return o, err
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir makes a directory
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fs.Debugf(f, "mkdir [%s]", f.realpath(dir))
|
||||
return f.client.MkdirAll(f.realpath(dir), 0755)
|
||||
}
|
||||
|
||||
// Rmdir deletes the directory
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
realpath := f.realpath(dir)
|
||||
fs.Debugf(f, "rmdir [%s]", realpath)
|
||||
|
||||
err := f.ensureDirectory(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// do not remove empty directory
|
||||
list, err := f.client.ReadDir(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(list) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return f.client.Remove(realpath)
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
realpath := f.realpath(dir)
|
||||
fs.Debugf(f, "purge [%s]", realpath)
|
||||
|
||||
err := f.ensureDirectory(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.client.RemoveAll(realpath)
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := f.client.StatFs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(info.Capacity)),
|
||||
Used: fs.NewUsageValue(int64(info.Used)),
|
||||
Free: fs.NewUsageValue(int64(info.Remaining)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *Fs) ensureDirectory(realpath string) error {
|
||||
info, err := f.client.Stat(realpath)
|
||||
|
||||
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) {
|
||||
info, err := f.client.Stat(realpath)
|
||||
|
||||
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (f *Fs) realpath(dir string) string {
|
||||
return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir))
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
)
|
||||
80
backend/hdfs/hdfs.go
Normal file
80
backend/hdfs/hdfs.go
Normal file
@@ -0,0 +1,80 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "hdfs",
|
||||
Description: "Hadoop distributed file system",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
}},
|
||||
}, {
|
||||
Name: "service_principal_name",
|
||||
Help: `Kerberos service principal name for the namenode.
|
||||
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon),
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options for this backend
|
||||
type Options struct {
|
||||
Namenode string `config:"namenode"`
|
||||
Username string `config:"username"`
|
||||
ServicePrincipalName string `config:"service_principal_name"`
|
||||
DataTransferProtection string `config:"data_transfer_protection"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// xPath make correct file path with leading '/'
|
||||
func xPath(root string, tail string) string {
|
||||
if !strings.HasPrefix(root, "/") {
|
||||
root = "/" + root
|
||||
}
|
||||
return path.Join(root, tail)
|
||||
}
|
||||
21
backend/hdfs/hdfs_test.go
Normal file
21
backend/hdfs/hdfs_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
// Test HDFS filesystem interface
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hdfs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHdfs:",
|
||||
NilObject: (*hdfs.Object)(nil),
|
||||
})
|
||||
}
|
||||
7
backend/hdfs/hdfs_unsupported.go
Normal file
7
backend/hdfs/hdfs_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Build for hdfs for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package hdfs
|
||||
178
backend/hdfs/object.go
Normal file
178
backend/hdfs/object.go
Normal file
@@ -0,0 +1,178 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Object describes an HDFS file
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
realpath := o.fs.realpath(o.Remote())
|
||||
err := o.fs.client.Chtimes(realpath, modTime, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Hash is not supported
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
realpath := o.realpath()
|
||||
fs.Debugf(o.fs, "open [%s]", realpath)
|
||||
f, err := o.fs.client.Open(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
}
|
||||
}
|
||||
|
||||
_, err = f.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if limit != -1 {
|
||||
in = readers.NewLimitedReadCloser(f, limit)
|
||||
} else {
|
||||
in = f
|
||||
}
|
||||
|
||||
return in, err
|
||||
}
|
||||
|
||||
// Update object
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
realpath := o.fs.realpath(src.Remote())
|
||||
dirname := path.Dir(realpath)
|
||||
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||
|
||||
err := o.fs.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := o.fs.client.Stat(realpath)
|
||||
if err == nil {
|
||||
err = o.fs.client.Remove(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
out, err := o.fs.client.Create(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
rerr := o.fs.client.Remove(realpath)
|
||||
if rerr != nil {
|
||||
fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return err
|
||||
}
|
||||
|
||||
err = out.Close()
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return err
|
||||
}
|
||||
|
||||
info, err = o.fs.client.Stat(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.size = info.Size()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
realpath := o.fs.realpath(o.remote)
|
||||
fs.Debugf(o.fs, "remove [%s]", realpath)
|
||||
return o.fs.client.Remove(realpath)
|
||||
}
|
||||
|
||||
func (o *Object) realpath() string {
|
||||
return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote))
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
@@ -6,6 +6,8 @@ package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
@@ -16,7 +18,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -38,33 +39,26 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to",
|
||||
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}, {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}, {
|
||||
Name: "headers",
|
||||
Help: `Set HTTP headers for all transactions
|
||||
Help: `Set HTTP headers for all transactions.
|
||||
|
||||
Use this to set additional HTTP headers for all transactions
|
||||
Use this to set additional HTTP headers for all transactions.
|
||||
|
||||
The input format is comma separated list of key,value pairs. Standard
|
||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||
|
||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_slash",
|
||||
Help: `Set this if the site doesn't end directories with /
|
||||
Help: `Set this if the site doesn't end directories with /.
|
||||
|
||||
Use this if your target website does not use / on the end of
|
||||
directories.
|
||||
@@ -80,7 +74,7 @@ directories.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing.
|
||||
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
@@ -115,8 +109,9 @@ type Options struct {
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
endpoint *url.URL
|
||||
endpointURL string // endpoint as a string
|
||||
httpClient *http.Client
|
||||
@@ -138,15 +133,14 @@ func statusError(res *http.Response, err error) error {
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
_ = res.Body.Close()
|
||||
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -172,7 +166,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
@@ -183,9 +177,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
@@ -210,17 +203,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
if isFile {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -383,17 +378,16 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
URL := f.url(dir)
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
return nil, fmt.Errorf("failed to readDir: %w", err)
|
||||
}
|
||||
if !strings.HasSuffix(URL, "/") {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
return nil, fmt.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
// Do the request
|
||||
req, err := http.NewRequest("GET", URL, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir failed")
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
f.addHeaders(req)
|
||||
res, err := f.httpClient.Do(req)
|
||||
if err == nil {
|
||||
@@ -404,7 +398,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
return nil, fmt.Errorf("failed to readDir: %w", err)
|
||||
}
|
||||
|
||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||
@@ -412,10 +406,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
case "text/html":
|
||||
names, err = parse(u, res.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir")
|
||||
return nil, fmt.Errorf("readDir: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("Can't parse content type %q", contentType)
|
||||
return nil, fmt.Errorf("Can't parse content type %q", contentType)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
@@ -435,19 +429,20 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
names, err := f.readDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||
}
|
||||
var (
|
||||
entriesMu sync.Mutex // to protect entries
|
||||
wg sync.WaitGroup
|
||||
in = make(chan string, fs.Config.Checkers)
|
||||
checkers = f.ci.Checkers
|
||||
in = make(chan string, checkers)
|
||||
)
|
||||
add := func(entry fs.DirEntry) {
|
||||
entriesMu.Lock()
|
||||
entries = append(entries, entry)
|
||||
entriesMu.Unlock()
|
||||
}
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
for i := 0; i < checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -544,11 +539,10 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat failed")
|
||||
return fmt.Errorf("stat failed: %w", err)
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
o.fs.addHeaders(req)
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
@@ -556,7 +550,7 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
return fmt.Errorf("failed to stat: %w", err)
|
||||
}
|
||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
@@ -569,7 +563,7 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
|
||||
return fmt.Errorf("failed to parse Content-Type: %q: %w", o.contentType, err)
|
||||
}
|
||||
if mediaType == "text/html" {
|
||||
return fs.ErrorNotAFile
|
||||
@@ -585,7 +579,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
@@ -593,11 +587,10 @@ func (o *Object) Storable() bool {
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
// Add optional headers
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
@@ -609,7 +602,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
// Configure the remote
|
||||
config.LoadConfig()
|
||||
configfile.Install()
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
@@ -69,7 +69,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(remoteName, "", m)
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
@@ -214,7 +214,7 @@ func TestIsAFileRoot(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "one%.txt", m)
|
||||
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f, false)
|
||||
@@ -224,7 +224,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
entries, err := f.List(context.Background(), "")
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -24,7 +24,7 @@ func newAuth(f *Fs) *auth {
|
||||
// Request constructs an http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
@@ -38,7 +38,7 @@ func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(resp *http.Response) error {
|
||||
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,20 +4,19 @@ package hubic
|
||||
|
||||
// This uses the normal swift mechanism to update the credentials and
|
||||
// ignores the expires field returned by the Hubic API. This may need
|
||||
// to be revisted after some actual experience.
|
||||
// to be revisited after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -56,11 +55,10 @@ func init() {
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("hubic", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
@@ -71,7 +69,7 @@ func init() {
|
||||
type credentials struct {
|
||||
Token string `json:"token"` // OpenStack token
|
||||
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||
Expires string `json:"expires"` // Expires date - eg "2015-11-09T14:24:56+01:00"
|
||||
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||
}
|
||||
|
||||
// Fs represents a remote hubic
|
||||
@@ -110,11 +108,10 @@ func (f *Fs) String() string {
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -123,7 +120,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
@@ -146,10 +143,10 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -157,15 +154,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Make the swift Connection
|
||||
ci := fs.GetConfig(ctx)
|
||||
c := &swiftLib.Connection{
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
}
|
||||
err = c.Authenticate()
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
@@ -176,7 +174,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
|
||||
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,10 +2,9 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -153,9 +152,9 @@ type CustomerInfo struct {
|
||||
AccountType string `json:"account_type"`
|
||||
SubscriptionType string `json:"subscription_type"`
|
||||
Usage int64 `json:"usage"`
|
||||
Qouta int64 `json:"quota"`
|
||||
Quota int64 `json:"quota"`
|
||||
BusinessUsage int64 `json:"business_usage"`
|
||||
BusinessQouta int64 `json:"business_quota"`
|
||||
BusinessQuota int64 `json:"business_quota"`
|
||||
WriteLocked bool `json:"write_locked"`
|
||||
ReadLocked bool `json:"read_locked"`
|
||||
LockedCause interface{} `json:"locked_cause"`
|
||||
@@ -368,6 +367,7 @@ type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
@@ -386,7 +386,7 @@ type Error struct {
|
||||
Cause string `xml:"cause"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -32,29 +32,29 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Help: "The Koofr API endpoint to use.",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name",
|
||||
Help: "Your Koofr user name.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
@@ -256,7 +256,7 @@ func (f *Fs) fullPath(part string) string {
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
@@ -267,7 +267,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
return nil, err
|
||||
}
|
||||
httpClient := httpclient.New()
|
||||
httpClient.Client = fshttp.NewClient(fs.Config)
|
||||
httpClient.Client = fshttp.NewClient(ctx)
|
||||
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
@@ -287,7 +287,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
DuplicateFiles: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
}).Fill(ctx, f)
|
||||
for _, m := range mounts {
|
||||
if opt.MountID != "" {
|
||||
if m.Id == opt.MountID {
|
||||
@@ -344,7 +344,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err e
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
if info.Type == "dir" {
|
||||
return nil, fs.ErrorNotAFile
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
@@ -534,7 +534,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return nil
|
||||
}
|
||||
|
||||
// About reports space usage (with a MB precision)
|
||||
// About reports space usage (with a MiB precision)
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
mount, err := f.client.MountsDetails(f.mountID)
|
||||
if err != nil {
|
||||
@@ -608,5 +608,25 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if err != nil {
|
||||
return "", translateErrorsDir(err)
|
||||
}
|
||||
return linkData.ShortURL, nil
|
||||
|
||||
// URL returned by API looks like following:
|
||||
//
|
||||
// https://app.koofr.net/links/35d9fb92-74a3-4930-b4ed-57f123bfb1a6
|
||||
//
|
||||
// Direct url looks like following:
|
||||
//
|
||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
||||
//
|
||||
// I am not sure about meaning of "path" parameter; in my expriments
|
||||
// it is always "%2F", and omitting it or putting any other value
|
||||
// results in 404.
|
||||
//
|
||||
// There is one more quirk: direct link to file in / returns that file,
|
||||
// direct link to file somewhere else in hierarchy returns zip archive
|
||||
// with one member.
|
||||
link := linkData.URL
|
||||
link = strings.ReplaceAll(link, "/links", "/content/links")
|
||||
link += "/files/get?path=%2F"
|
||||
|
||||
return link, nil
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
//go:build darwin || dragonfly || freebsd || linux
|
||||
// +build darwin dragonfly freebsd linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -19,7 +20,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", err)
|
||||
}
|
||||
bs := int64(s.Bsize) // nolint: unconvert
|
||||
usage := &fs.Usage{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user