mirror of
https://github.com/rclone/rclone.git
synced 2026-02-24 00:22:49 +00:00
Compare commits
1011 Commits
fix-4293-v
...
fix-sftp-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
97ade36d8c | ||
|
|
6545755758 | ||
|
|
c86a55c798 | ||
|
|
1d280081d4 | ||
|
|
f48cb5985f | ||
|
|
55e766f4e8 | ||
|
|
63a24255f8 | ||
|
|
bc74f0621e | ||
|
|
f39a08c9d7 | ||
|
|
675548070d | ||
|
|
37ff05a5fa | ||
|
|
c67c1ab4ee | ||
|
|
76f8095bc5 | ||
|
|
f646cd0a2a | ||
|
|
d38f6bb0ab | ||
|
|
11d86c74b2 | ||
|
|
feb6046a8a | ||
|
|
807102ada2 | ||
|
|
770b3496a1 | ||
|
|
da36ce08e4 | ||
|
|
8652cfe575 | ||
|
|
94b1439299 | ||
|
|
97c9e55ddb | ||
|
|
c0b2832509 | ||
|
|
7436768d62 | ||
|
|
55153403aa | ||
|
|
daf449b5f2 | ||
|
|
221dfc3882 | ||
|
|
aab29353d1 | ||
|
|
c24504b793 | ||
|
|
6338d0026e | ||
|
|
ba836d45ff | ||
|
|
367cf984af | ||
|
|
6b7d7d0441 | ||
|
|
cf19073ac9 | ||
|
|
ba5c559fec | ||
|
|
abb8fe8ba1 | ||
|
|
765af387e6 | ||
|
|
d05cf6aba8 | ||
|
|
76a3fef24d | ||
|
|
b40d9bd4c4 | ||
|
|
4680c0776d | ||
|
|
fb305b5976 | ||
|
|
5e91b93e59 | ||
|
|
58c99427b3 | ||
|
|
fee0abf513 | ||
|
|
40024990b7 | ||
|
|
04aa6969a4 | ||
|
|
d2050523de | ||
|
|
1cc6dd349e | ||
|
|
721bae11c3 | ||
|
|
b439199578 | ||
|
|
0bfd6f793b | ||
|
|
76ea716abf | ||
|
|
e635f4c0be | ||
|
|
0cb973f127 | ||
|
|
96ace599a8 | ||
|
|
80bccacd83 | ||
|
|
3349b055f5 | ||
|
|
bef0c23e00 | ||
|
|
84201ed891 | ||
|
|
04608428bf | ||
|
|
6aaa06d7be | ||
|
|
e53bad5353 | ||
|
|
f5397246eb | ||
|
|
b8b73f2656 | ||
|
|
96b67ce0ec | ||
|
|
e2beeffd76 | ||
|
|
30b949642d | ||
|
|
92b3518c78 | ||
|
|
062919e08c | ||
|
|
654f5309b0 | ||
|
|
318fa4472b | ||
|
|
5104e24153 | ||
|
|
9d87a5192d | ||
|
|
29f967dba3 | ||
|
|
1f846c18d4 | ||
|
|
41f561bf26 | ||
|
|
df60e6323c | ||
|
|
58006a925a | ||
|
|
ee2fac1855 | ||
|
|
2188fe38e5 | ||
|
|
b5f8f0973b | ||
|
|
85b8ba9469 | ||
|
|
04a1f673f0 | ||
|
|
0574ebf44a | ||
|
|
22e86ce335 | ||
|
|
c9fce20249 | ||
|
|
5b6f637461 | ||
|
|
07f2f3a62e | ||
|
|
6dc190ec93 | ||
|
|
71f75a1d95 | ||
|
|
1b44035e45 | ||
|
|
054b467f32 | ||
|
|
23da913d03 | ||
|
|
c0cda087a8 | ||
|
|
1773717a47 | ||
|
|
04308dcaa1 | ||
|
|
06f27384dd | ||
|
|
82f1f7d2c4 | ||
|
|
6555d3eb33 | ||
|
|
03229cf394 | ||
|
|
f572bf7829 | ||
|
|
f593558dc2 | ||
|
|
08040a57b0 | ||
|
|
2fa7a3c0fb | ||
|
|
798d1293df | ||
|
|
75c417ad93 | ||
|
|
5ee646f264 | ||
|
|
4a4aca4da7 | ||
|
|
2e4b65f888 | ||
|
|
77cda6773c | ||
|
|
dbc5167281 | ||
|
|
635d1e10ae | ||
|
|
296ceadda6 | ||
|
|
7ae2891252 | ||
|
|
99caf79ffe | ||
|
|
095cf9e4be | ||
|
|
e57553930f | ||
|
|
f122808d86 | ||
|
|
94dbfa4ea6 | ||
|
|
6f2e525821 | ||
|
|
119bddc10b | ||
|
|
28e9fd45cc | ||
|
|
326f3b35ff | ||
|
|
ce83228cb2 | ||
|
|
732bc08ced | ||
|
|
6ef7178ee4 | ||
|
|
9ff6f48d74 | ||
|
|
532af77fd1 | ||
|
|
ab7dfe0c87 | ||
|
|
e489a101f6 | ||
|
|
35a86193b7 | ||
|
|
2833941da8 | ||
|
|
9e6c23d9af | ||
|
|
8bef972262 | ||
|
|
0a968818f6 | ||
|
|
c2ac353183 | ||
|
|
773da395fb | ||
|
|
9e8cd6bff9 | ||
|
|
5d2e327b6f | ||
|
|
77221d7528 | ||
|
|
1971c1ef87 | ||
|
|
7e7dbe16c2 | ||
|
|
002d323c94 | ||
|
|
4ad62ec016 | ||
|
|
95ee14bb2c | ||
|
|
88aabd1f71 | ||
|
|
34627c5c7e | ||
|
|
e33303df94 | ||
|
|
665eceaec3 | ||
|
|
ba09ee18bb | ||
|
|
62bf63d36f | ||
|
|
f38c262471 | ||
|
|
5db88fed2b | ||
|
|
316e65589b | ||
|
|
4401d180aa | ||
|
|
9ccd870267 | ||
|
|
16d1da2c1e | ||
|
|
00a0ee1899 | ||
|
|
b78c9a65fa | ||
|
|
ef3c350686 | ||
|
|
742af80972 | ||
|
|
08a2df51be | ||
|
|
2925e1384c | ||
|
|
2ec0c8d45f | ||
|
|
98579608ec | ||
|
|
a1a41aa0c1 | ||
|
|
f8d56bebaf | ||
|
|
5d799431a7 | ||
|
|
8f23cae1c0 | ||
|
|
964088affa | ||
|
|
f4068d406b | ||
|
|
7511b6f4f1 | ||
|
|
e618ea83dd | ||
|
|
34dc257c55 | ||
|
|
4cacf5d30c | ||
|
|
0537791d14 | ||
|
|
4b1d28550a | ||
|
|
d27c35ee4a | ||
|
|
ffec0d4f03 | ||
|
|
89daa9efd1 | ||
|
|
ee502a757f | ||
|
|
386acaa110 | ||
|
|
efdee3a5fe | ||
|
|
5d85e6bc9c | ||
|
|
4a9469a3dc | ||
|
|
f8884a7200 | ||
|
|
2a40f00077 | ||
|
|
9799fdbae2 | ||
|
|
492504a601 | ||
|
|
0c03a7fead | ||
|
|
7afb4487ef | ||
|
|
b9d0ed4f5c | ||
|
|
baa4c039a0 | ||
|
|
31a8211afa | ||
|
|
3544e09e95 | ||
|
|
b456be4303 | ||
|
|
3e96752079 | ||
|
|
4a5cbf2a19 | ||
|
|
dcd4edc9f5 | ||
|
|
7f5e347d94 | ||
|
|
040677ab5b | ||
|
|
6366d3dfc5 | ||
|
|
60d376c323 | ||
|
|
7b1ca716bf | ||
|
|
d8711cf7f9 | ||
|
|
cd69f9e6e8 | ||
|
|
a737ff21af | ||
|
|
ad9aa693a3 | ||
|
|
964c3e0732 | ||
|
|
a46a3c0811 | ||
|
|
60dcafe04d | ||
|
|
813bf029d4 | ||
|
|
f2d3264054 | ||
|
|
23a0d4a1e6 | ||
|
|
b96ebfc40b | ||
|
|
3fe2aaf96c | ||
|
|
c163e6b250 | ||
|
|
c1492cfa28 | ||
|
|
38a8071a58 | ||
|
|
8c68a76a4a | ||
|
|
e7b736f8ca | ||
|
|
cb30a8c80e | ||
|
|
629a3eeca2 | ||
|
|
f52ae75a51 | ||
|
|
9d5c5bf7ab | ||
|
|
53573b4a09 | ||
|
|
3622e064f5 | ||
|
|
6d28ea7ab5 | ||
|
|
b9fd02039b | ||
|
|
1a41c930f3 | ||
|
|
ddb7eb6e0a | ||
|
|
c114695a66 | ||
|
|
fcba51557f | ||
|
|
9393225a1d | ||
|
|
3d3ff61f74 | ||
|
|
d98f192425 | ||
|
|
54771e4402 | ||
|
|
dc286529bc | ||
|
|
7dc7c021db | ||
|
|
fe1aa13069 | ||
|
|
5fa8e7d957 | ||
|
|
9db7c51eaa | ||
|
|
3859fe2f52 | ||
|
|
0caf417779 | ||
|
|
9eab258ffb | ||
|
|
7df57cd625 | ||
|
|
1fd9b483c8 | ||
|
|
93353c431b | ||
|
|
886dfd23e2 | ||
|
|
116a8021bb | ||
|
|
9e2fbe0f1a | ||
|
|
6d65d116df | ||
|
|
edaeb51ea9 | ||
|
|
6e2e2d9eb2 | ||
|
|
20e15e52a9 | ||
|
|
d0f8b4f479 | ||
|
|
58d82a5c73 | ||
|
|
c0c74003f2 | ||
|
|
60bc7a079a | ||
|
|
20c5ca08fb | ||
|
|
fc57648b75 | ||
|
|
8c5c91e68f | ||
|
|
9dd39e8524 | ||
|
|
9c9186183d | ||
|
|
2ccf416e83 | ||
|
|
5577c7b760 | ||
|
|
f6dbb98a1d | ||
|
|
d042f3194f | ||
|
|
524cd327e6 | ||
|
|
b8c1cf7451 | ||
|
|
0fa68bda02 | ||
|
|
1378bfee63 | ||
|
|
d6870473a1 | ||
|
|
12cd322643 | ||
|
|
1406b6c3c9 | ||
|
|
088a83872d | ||
|
|
cb46092883 | ||
|
|
a2cd5d8fa3 | ||
|
|
1fe2460e38 | ||
|
|
ef5c212f9b | ||
|
|
268a7ff7b8 | ||
|
|
b47d6001a9 | ||
|
|
a4c4ddf052 | ||
|
|
4cc2a7f342 | ||
|
|
c72d2c67ed | ||
|
|
9deab5a563 | ||
|
|
da5b0cb611 | ||
|
|
0187bc494a | ||
|
|
2bdbf00fa3 | ||
|
|
9ee3ad70e9 | ||
|
|
ce182adf46 | ||
|
|
97fc3b9046 | ||
|
|
e59acd16c6 | ||
|
|
acfd7e2403 | ||
|
|
f47893873d | ||
|
|
b9a015e5b9 | ||
|
|
d72d9e591a | ||
|
|
df451e1e70 | ||
|
|
d9959b0271 | ||
|
|
f2c0f82fc6 | ||
|
|
f76c6cc893 | ||
|
|
5e95877840 | ||
|
|
8b491f7f3d | ||
|
|
aea8776a43 | ||
|
|
c387eb8c09 | ||
|
|
a12b2746b4 | ||
|
|
3dbef2b2fd | ||
|
|
f111e0eaf8 | ||
|
|
96207f342c | ||
|
|
e25ac4dcf0 | ||
|
|
28f6efe955 | ||
|
|
f17d7c0012 | ||
|
|
3761cf68b4 | ||
|
|
71554c1371 | ||
|
|
8a46dd1b57 | ||
|
|
3b21857097 | ||
|
|
a10fbf16ea | ||
|
|
f4750928ee | ||
|
|
657be2ace5 | ||
|
|
feaaca4987 | ||
|
|
ebd9462ea6 | ||
|
|
6b9e4f939d | ||
|
|
687a3b1832 | ||
|
|
25d5ed763c | ||
|
|
5e038a5e1e | ||
|
|
4b4e531846 | ||
|
|
89e8fb4818 | ||
|
|
b9bf91c510 | ||
|
|
40b58d59ad | ||
|
|
4fbb50422c | ||
|
|
8d847a4e94 | ||
|
|
e3e08a48cb | ||
|
|
ff6868900d | ||
|
|
aab076029f | ||
|
|
294f090361 | ||
|
|
301e1ad982 | ||
|
|
3cf6ea848b | ||
|
|
bb0b6432ae | ||
|
|
46078d391f | ||
|
|
849bf20598 | ||
|
|
e91f2e342a | ||
|
|
713f8f357d | ||
|
|
83368998be | ||
|
|
4013bc4a4c | ||
|
|
32925dae1f | ||
|
|
6cc70997ba | ||
|
|
d260e3824e | ||
|
|
a5bd26395e | ||
|
|
6fa74340a0 | ||
|
|
4d8ef7bca7 | ||
|
|
6a9ae32012 | ||
|
|
a7fd65bf2d | ||
|
|
1fed2d910c | ||
|
|
c95b580478 | ||
|
|
2be310cd6e | ||
|
|
02a5d350f9 | ||
|
|
18cd2064ec | ||
|
|
59ed70ca91 | ||
|
|
6df56c55b0 | ||
|
|
94e34cb783 | ||
|
|
c3e2392f2b | ||
|
|
f7e3115955 | ||
|
|
e01e8010a0 | ||
|
|
75056dc9b2 | ||
|
|
7aa7acd926 | ||
|
|
0ad38dd6fa | ||
|
|
9cc8ff4dd4 | ||
|
|
b029fb591f | ||
|
|
95e9c4e7f1 | ||
|
|
c40bafb72c | ||
|
|
eac77b06ab | ||
|
|
0355d6daf2 | ||
|
|
c4b8df6903 | ||
|
|
0dd3ae5e0d | ||
|
|
e5aa92c922 | ||
|
|
f6265fbeff | ||
|
|
1397b85214 | ||
|
|
86a0dae632 | ||
|
|
076ff96f6b | ||
|
|
985011e73b | ||
|
|
9ca6bf59c6 | ||
|
|
e5d5ae9ab7 | ||
|
|
ac6bb222f9 | ||
|
|
62d5876eb4 | ||
|
|
9808a53416 | ||
|
|
cc08f66dc1 | ||
|
|
6b8da24eb8 | ||
|
|
333faa6c68 | ||
|
|
1b92e4636e | ||
|
|
c5a299d5b1 | ||
|
|
04a8859d29 | ||
|
|
4b5fe3adad | ||
|
|
7db68b72f1 | ||
|
|
9c667be2a1 | ||
|
|
c0cf54067a | ||
|
|
297ca23abd | ||
|
|
d809930e1d | ||
|
|
fdc0528bd5 | ||
|
|
a0320d6e94 | ||
|
|
89bf036e15 | ||
|
|
1605f9e14d | ||
|
|
cd6fd4be4b | ||
|
|
4ea7c7aa47 | ||
|
|
5834020316 | ||
|
|
f5066a09cd | ||
|
|
863bd93c30 | ||
|
|
d96af3b005 | ||
|
|
3280ceee3b | ||
|
|
930bca2478 | ||
|
|
23b12c39bd | ||
|
|
9d37c208b7 | ||
|
|
c81311722e | ||
|
|
843ddd9136 | ||
|
|
a3fcadddc8 | ||
|
|
a63e1f1383 | ||
|
|
5b84adf3b9 | ||
|
|
f890965020 | ||
|
|
f88a5542cf | ||
|
|
fd94b3a473 | ||
|
|
2aebeb6061 | ||
|
|
e779cacc82 | ||
|
|
37e630178e | ||
|
|
2cdc071b85 | ||
|
|
496e32fd8a | ||
|
|
bf3ba50a0f | ||
|
|
22c226b152 | ||
|
|
5ca7f1fe87 | ||
|
|
f14220ef1e | ||
|
|
424aaac2e1 | ||
|
|
47b69d6300 | ||
|
|
c0c2505977 | ||
|
|
2d7afe8690 | ||
|
|
92187a3b33 | ||
|
|
53aa4b87fd | ||
|
|
edfe183ba2 | ||
|
|
dfc63eb8f1 | ||
|
|
f21f2529a3 | ||
|
|
1efb543ad8 | ||
|
|
92e36fcfc5 | ||
|
|
bf8542c670 | ||
|
|
cc5a1e90d8 | ||
|
|
b39fa54ab2 | ||
|
|
f1147fe1dd | ||
|
|
8897377a54 | ||
|
|
f50b4e51ed | ||
|
|
f135acbdfb | ||
|
|
cdd99a6f39 | ||
|
|
6ecb5794bc | ||
|
|
9a21aff4ed | ||
|
|
8574a7bd67 | ||
|
|
a0fc10e41a | ||
|
|
ae3963e4b4 | ||
|
|
e32f08f37b | ||
|
|
fea4b753b2 | ||
|
|
b2b5b7598c | ||
|
|
5f943aabc8 | ||
|
|
84c785bc36 | ||
|
|
993146375e | ||
|
|
bbe791a886 | ||
|
|
1545ace8f2 | ||
|
|
bcac8fdc83 | ||
|
|
15e1a6bee7 | ||
|
|
9710ded60f | ||
|
|
5f3672102c | ||
|
|
644cc69108 | ||
|
|
1415666074 | ||
|
|
bae550c71e | ||
|
|
beff081abb | ||
|
|
7f5ee5d81f | ||
|
|
8b41dfa50a | ||
|
|
0d8bcc08da | ||
|
|
d3b7f14b66 | ||
|
|
f66928a846 | ||
|
|
3b1122c888 | ||
|
|
463a18aa07 | ||
|
|
0a932dc1f2 | ||
|
|
8856e0e559 | ||
|
|
3b6df71838 | ||
|
|
31de631b22 | ||
|
|
189ef5f257 | ||
|
|
2f67681e3b | ||
|
|
41127965b0 | ||
|
|
8171671d82 | ||
|
|
75617c0c3b | ||
|
|
8b9d23916b | ||
|
|
e43b79e33d | ||
|
|
459cc70a50 | ||
|
|
20578f3f89 | ||
|
|
15da53696e | ||
|
|
2bddba118e | ||
|
|
c7e5976e11 | ||
|
|
f0bf9cfda1 | ||
|
|
671dd047f7 | ||
|
|
6272ca74bc | ||
|
|
f5af761466 | ||
|
|
06f1c0c61c | ||
|
|
e6a9f005d6 | ||
|
|
8f6f4b053c | ||
|
|
fe15a2eeeb | ||
|
|
019667170f | ||
|
|
7a496752f3 | ||
|
|
b569dc11a0 | ||
|
|
df4e6079f1 | ||
|
|
6156f90601 | ||
|
|
cdaea62932 | ||
|
|
78afe01d15 | ||
|
|
4eac88babf | ||
|
|
b4217fabd3 | ||
|
|
92b9dabf3c | ||
|
|
4323ff8a63 | ||
|
|
3e188495f5 | ||
|
|
acb9e17eb3 | ||
|
|
c8ab4f1d02 | ||
|
|
e776a1b122 | ||
|
|
c57af26de9 | ||
|
|
7d89912666 | ||
|
|
cd075f1703 | ||
|
|
35b2ca642c | ||
|
|
127f48e8ad | ||
|
|
3e986cdf54 | ||
|
|
b80d498304 | ||
|
|
757e696a6b | ||
|
|
e3979131f2 | ||
|
|
a774f6bfdb | ||
|
|
d7cd35e2ca | ||
|
|
38e70f1797 | ||
|
|
3b49440c25 | ||
|
|
7c0287b824 | ||
|
|
f97c2c85bd | ||
|
|
14c0d8a93e | ||
|
|
768ad4de2a | ||
|
|
817987dfc4 | ||
|
|
eb090d3544 | ||
|
|
4daf8b7083 | ||
|
|
0be69018b8 | ||
|
|
9b9ab5f3e8 | ||
|
|
072464cbdb | ||
|
|
b0491dec88 | ||
|
|
ccfefedb47 | ||
|
|
2fffcf9e7f | ||
|
|
a39a5d261c | ||
|
|
45b57822d5 | ||
|
|
d8984cd37f | ||
|
|
80e63af470 | ||
|
|
db2c38b21b | ||
|
|
cef51d58ac | ||
|
|
e0b5a13a13 | ||
|
|
de21356154 | ||
|
|
35a4de2030 | ||
|
|
847625822f | ||
|
|
3877df4e62 | ||
|
|
ec73d2fb9a | ||
|
|
a7689d7023 | ||
|
|
847a44e7ad | ||
|
|
b3710c962e | ||
|
|
35ccfe1721 | ||
|
|
ef2bfb9718 | ||
|
|
a97effa27c | ||
|
|
01adee7554 | ||
|
|
78a76b0d29 | ||
|
|
e775328523 | ||
|
|
50344e7792 | ||
|
|
d58fdb10db | ||
|
|
feaacfd226 | ||
|
|
e3c238ac95 | ||
|
|
752997c5e8 | ||
|
|
71edc75ca6 | ||
|
|
768e4c4735 | ||
|
|
c553ad5158 | ||
|
|
c66b901320 | ||
|
|
dd67a3d5f5 | ||
|
|
e972f2c98a | ||
|
|
acbcb1ea9d | ||
|
|
d4444375ac | ||
|
|
00bf40a8ef | ||
|
|
5d1f947f32 | ||
|
|
b594cb9430 | ||
|
|
add7a35e55 | ||
|
|
2af7b61fc3 | ||
|
|
cb97c2b0d3 | ||
|
|
35da38e93f | ||
|
|
963c0f28b9 | ||
|
|
b3815dc0c2 | ||
|
|
8053fc4e16 | ||
|
|
66c3f2f31f | ||
|
|
62c9074132 | ||
|
|
a854cb9617 | ||
|
|
fbf9942fe7 | ||
|
|
f425950a52 | ||
|
|
1d40bc1901 | ||
|
|
ba51409c3c | ||
|
|
a64fc05385 | ||
|
|
4d54454900 | ||
|
|
5601652d65 | ||
|
|
b218bc5bed | ||
|
|
65eee674b9 | ||
|
|
72eb74e94a | ||
|
|
6bfec25165 | ||
|
|
1c61d51448 | ||
|
|
f7fe1d766b | ||
|
|
55aec19389 | ||
|
|
9db51117dc | ||
|
|
a9c9467210 | ||
|
|
f50e15c77c | ||
|
|
e3191d096f | ||
|
|
07c40780b3 | ||
|
|
67b82b4a28 | ||
|
|
5f47e1e034 | ||
|
|
e92cb9e8f8 | ||
|
|
9ea990d5a2 | ||
|
|
08b9ede217 | ||
|
|
6342499c47 | ||
|
|
f347a198f7 | ||
|
|
060642ad14 | ||
|
|
629c0d0f65 | ||
|
|
f7404f52e7 | ||
|
|
74a321e156 | ||
|
|
fce885c0cd | ||
|
|
4028a245b0 | ||
|
|
c5b07a6714 | ||
|
|
b0965bf34f | ||
|
|
1eaca9fb45 | ||
|
|
d833e49db9 | ||
|
|
3aee544cee | ||
|
|
9e87f5090f | ||
|
|
c8cfa43ccc | ||
|
|
ed7af3f370 | ||
|
|
be19d6a403 | ||
|
|
46858ee6fe | ||
|
|
a94e4d803b | ||
|
|
dcbe62ab0a | ||
|
|
121b981b49 | ||
|
|
73bb9322f5 | ||
|
|
bdc2278a30 | ||
|
|
ea8d13d841 | ||
|
|
e45716cac2 | ||
|
|
c98dd8755c | ||
|
|
5ae5e1dd56 | ||
|
|
4f8ee736b1 | ||
|
|
816e68a274 | ||
|
|
6ab6c8eefa | ||
|
|
cb16f42075 | ||
|
|
7ae84a3c91 | ||
|
|
2fd543c989 | ||
|
|
50cf97fc72 | ||
|
|
4acd68188b | ||
|
|
b81b6da3fc | ||
|
|
56ad6aac4d | ||
|
|
1efb8ea280 | ||
|
|
9cfc01f791 | ||
|
|
86014cebd7 | ||
|
|
507f861c67 | ||
|
|
e073720a8f | ||
|
|
ce7cdadb71 | ||
|
|
a223b78872 | ||
|
|
d5181118cc | ||
|
|
886b3abac1 | ||
|
|
250f8d9371 | ||
|
|
8a429d12cf | ||
|
|
8bf4697dc2 | ||
|
|
584523672c | ||
|
|
a9585efd64 | ||
|
|
f6b1f05e0f | ||
|
|
cc8538e0d1 | ||
|
|
f7d9b15707 | ||
|
|
83406bc473 | ||
|
|
1cfce703b2 | ||
|
|
3b24a4cada | ||
|
|
135adb426e | ||
|
|
987dac9fe5 | ||
|
|
7fde48a805 | ||
|
|
ce9028bb5b | ||
|
|
52688a63c6 | ||
|
|
8904e81cdf | ||
|
|
bcbe393af3 | ||
|
|
47aada16a0 | ||
|
|
c22d04aa30 | ||
|
|
354b4f19ec | ||
|
|
0ed1857fa9 | ||
|
|
dfadd98969 | ||
|
|
19a8b66cee | ||
|
|
07dee18d6b | ||
|
|
70e8b11805 | ||
|
|
9d574c0d63 | ||
|
|
2e21c58e6a | ||
|
|
506342317b | ||
|
|
979bb07c86 | ||
|
|
dfeae0e70a | ||
|
|
f43a9ac17e | ||
|
|
c3ac9319f4 | ||
|
|
76ee3060d1 | ||
|
|
4bb241c435 | ||
|
|
a06f4c2514 | ||
|
|
53aa03cc44 | ||
|
|
1ce0b45965 | ||
|
|
7078311a84 | ||
|
|
ef9b717961 | ||
|
|
09246ed9d5 | ||
|
|
33ea55efed | ||
|
|
79474b2e4c | ||
|
|
fb001b6c01 | ||
|
|
2896f51a22 | ||
|
|
5b9115d87a | ||
|
|
211b08f771 | ||
|
|
f0905499e3 | ||
|
|
7985df3768 | ||
|
|
095c7bd801 | ||
|
|
23469c9c7c | ||
|
|
2347762b0d | ||
|
|
636fb5344a | ||
|
|
aaa8b7738a | ||
|
|
bc4282e49e | ||
|
|
2812816142 | ||
|
|
ceeac84cfe | ||
|
|
83d48f65b6 | ||
|
|
95d0410baa | ||
|
|
2708a7569e | ||
|
|
45e8bea8d0 | ||
|
|
f980f230c5 | ||
|
|
e204f89685 | ||
|
|
f7efce594b | ||
|
|
1fb6ad700f | ||
|
|
e3fe31f7cb | ||
|
|
8b96933e58 | ||
|
|
d69b96a94c | ||
|
|
d846210978 | ||
|
|
30c8b1b84f | ||
|
|
43e0929339 | ||
|
|
6c70c42577 | ||
|
|
cd2c06f2a7 | ||
|
|
af55a74bd2 | ||
|
|
d00c126cef | ||
|
|
bedf6e90d2 | ||
|
|
e8c84d8b53 | ||
|
|
f89ff3872d | ||
|
|
127f0fc64c | ||
|
|
0cfa89f316 | ||
|
|
bfcd4113c3 | ||
|
|
0e7fc7613f | ||
|
|
8ac2f52b6e | ||
|
|
1973fc1ecc | ||
|
|
7c39a13281 | ||
|
|
c5c503cbbe | ||
|
|
d09488b829 | ||
|
|
0a6196716c | ||
|
|
8bc9b2b883 | ||
|
|
a15f50254a | ||
|
|
5d4f77a022 | ||
|
|
a089de0964 | ||
|
|
3068ae8447 | ||
|
|
67ff153b0c | ||
|
|
3e1cb8302a | ||
|
|
e4a87f772f | ||
|
|
d4f38d45a5 | ||
|
|
bbe7eb35f1 | ||
|
|
87e54f2dde | ||
|
|
3f3afe489f | ||
|
|
70b21d9c87 | ||
|
|
e00bf3d723 | ||
|
|
605f2b819a | ||
|
|
bf2b975359 | ||
|
|
00a5086ff2 | ||
|
|
be6a888e50 | ||
|
|
dad8447423 | ||
|
|
65ff109065 | ||
|
|
b7253fc1c1 | ||
|
|
d143f576c6 | ||
|
|
a152351a71 | ||
|
|
a2fa1370c5 | ||
|
|
bed83b0b64 | ||
|
|
cf0bdad5de | ||
|
|
85d35ef03c | ||
|
|
514d10b314 | ||
|
|
5164c3d2d0 | ||
|
|
ffdd0719e7 | ||
|
|
4e2b5389d7 | ||
|
|
dc4e63631f | ||
|
|
275bf456d3 | ||
|
|
7dfa871095 | ||
|
|
70cc88de22 | ||
|
|
4bc0f46955 | ||
|
|
5b09599a23 | ||
|
|
f4dd8e3fe8 | ||
|
|
d0888edc0a | ||
|
|
51a230d7fd | ||
|
|
fc5b14b620 | ||
|
|
bbddadbd04 | ||
|
|
7428e47ebc | ||
|
|
72083c65ad | ||
|
|
70f92fd6b3 | ||
|
|
a86cedbc24 | ||
|
|
0906f8dd3b | ||
|
|
664213cedb | ||
|
|
75a7226174 | ||
|
|
9e925becb6 | ||
|
|
e3a5bb9b48 | ||
|
|
b7eeb0e260 | ||
|
|
84d64ddabc | ||
|
|
6c9f92aee6 | ||
|
|
893297760b | ||
|
|
c5c56cda02 | ||
|
|
2295123cad | ||
|
|
ff0280c0cb | ||
|
|
64d736a57b | ||
|
|
5f1d5a1897 | ||
|
|
aac2406e19 | ||
|
|
6dc28ef50a | ||
|
|
66def93373 | ||
|
|
c58023a9ba | ||
|
|
3edc9ff0b0 | ||
|
|
8e8ae1edc7 | ||
|
|
20b00db390 | ||
|
|
db4bbf9521 | ||
|
|
2b7994e739 | ||
|
|
e7fbdac8e0 | ||
|
|
41ec712aa9 | ||
|
|
17acae2b00 | ||
|
|
57261c7e97 | ||
|
|
d8239e0194 | ||
|
|
004c3796de | ||
|
|
18c7549770 | ||
|
|
e5190f14ce | ||
|
|
433b73a5a8 | ||
|
|
ab88a3341f | ||
|
|
181da3ce9b | ||
|
|
b14a58c9b8 | ||
|
|
60cc2cba1f | ||
|
|
c797494d88 | ||
|
|
e2a57182be | ||
|
|
8928441466 | ||
|
|
0e8965060f | ||
|
|
f3cf6fcdd7 | ||
|
|
18ccf0f871 | ||
|
|
313647bcf3 | ||
|
|
61fe068c90 | ||
|
|
5c49096e11 | ||
|
|
a73c78545d | ||
|
|
e0fd560711 | ||
|
|
6a56ac1032 | ||
|
|
96299629b4 | ||
|
|
75de30cfa8 | ||
|
|
233bed6a73 | ||
|
|
b3964efe4d | ||
|
|
575f061629 | ||
|
|
640d7d3b4e | ||
|
|
e92294b482 | ||
|
|
22937e8982 | ||
|
|
c3d1474eb9 | ||
|
|
e2426ea87b | ||
|
|
e58a61175f | ||
|
|
05bea46c3e | ||
|
|
c8a719ae0d | ||
|
|
c3884aafd9 | ||
|
|
0a9785a4ff | ||
|
|
8140f67092 | ||
|
|
4a001b8a02 | ||
|
|
525433e6dd | ||
|
|
f71f6c57d7 | ||
|
|
e35623c72e | ||
|
|
344bce7e2a | ||
|
|
3a4322a7ba | ||
|
|
27b9ae4fc3 | ||
|
|
7e2488af10 | ||
|
|
41ecb586c4 | ||
|
|
510ac341e1 | ||
|
|
358e2b2665 | ||
|
|
3305079a03 | ||
|
|
6ed8471a37 | ||
|
|
dc7ce37c32 | ||
|
|
57c10babfe | ||
|
|
23b2c58018 | ||
|
|
78abd21eec | ||
|
|
841edc729c | ||
|
|
b03fcbcc12 | ||
|
|
b60ac7b66a | ||
|
|
725ae91387 | ||
|
|
b7dd3ce608 | ||
|
|
70c8566cb8 | ||
|
|
0d066bdf46 | ||
|
|
3affc2e066 | ||
|
|
23c826db52 | ||
|
|
1ae36a4e32 | ||
|
|
bc969ad244 | ||
|
|
d7ac1f5b0e | ||
|
|
5bf53fe3ac | ||
|
|
9cc17cec9a | ||
|
|
e2816629d0 | ||
|
|
3f0d54daae | ||
|
|
7dcbebf9bc | ||
|
|
c31defbbd3 | ||
|
|
e54ce35019 | ||
|
|
75d54d720c | ||
|
|
cc0421cb9e | ||
|
|
9c01ac9894 | ||
|
|
20300d1f61 | ||
|
|
6231beefc5 | ||
|
|
068cfdaa00 | ||
|
|
7d62d1fc97 | ||
|
|
e13ac28b8d | ||
|
|
b30ee57cd9 | ||
|
|
921e384c4d | ||
|
|
bf685f600e | ||
|
|
b6d3cad70e | ||
|
|
c665201b85 | ||
|
|
d6996e3347 | ||
|
|
dffcc99373 | ||
|
|
09b79679cd | ||
|
|
cf68e61f40 | ||
|
|
22674d1146 | ||
|
|
f9ee0dc3f2 | ||
|
|
65fa6a946a | ||
|
|
4cf82118d9 | ||
|
|
5f56611a76 | ||
|
|
0f7a2f0f3c | ||
|
|
be2b310ace | ||
|
|
45afe97e8e | ||
|
|
fee8f21ce1 | ||
|
|
1abc252ed3 | ||
|
|
801a820c54 | ||
|
|
2bcc66c805 | ||
|
|
b5ba077a2f | ||
|
|
0931b84940 | ||
|
|
94a0991584 | ||
|
|
9d3d397f50 | ||
|
|
38e8415e77 | ||
|
|
fb9edbe34e | ||
|
|
85f9bd1abf | ||
|
|
63e4d2952b | ||
|
|
52247e9a9f | ||
|
|
d2ad293fae | ||
|
|
6082096f7e | ||
|
|
9a6fcd035b | ||
|
|
47d08ac1f1 | ||
|
|
c4c6a1ee7d | ||
|
|
29d6358f34 | ||
|
|
6308153ae7 | ||
|
|
a9713cd0ed | ||
|
|
1cae4152f9 | ||
|
|
4884bee8ba | ||
|
|
54fc2821cd | ||
|
|
5549fd25fc | ||
|
|
3d5a63607e | ||
|
|
cb7534dcdf | ||
|
|
770a6f2cad | ||
|
|
aab9aa8a2e | ||
|
|
3a14b1d5a9 | ||
|
|
ac044b1c54 | ||
|
|
61c7ea4085 | ||
|
|
01280798e9 | ||
|
|
db56d30078 | ||
|
|
a00274d2ab | ||
|
|
82975109af | ||
|
|
30eb094f28 | ||
|
|
b401a727f7 | ||
|
|
8eb16ce89c | ||
|
|
8e7eb37456 | ||
|
|
4d7f91309b | ||
|
|
109b695621 | ||
|
|
177d2f2f79 | ||
|
|
f5439ddc54 | ||
|
|
324077fb48 | ||
|
|
f50ab981f7 | ||
|
|
0c620ad076 | ||
|
|
49cf2eb7e4 | ||
|
|
a2afa9aadd | ||
|
|
c2f3949ded | ||
|
|
bf355c4527 | ||
|
|
3daa63cae8 | ||
|
|
4441e012cf | ||
|
|
122a47fba6 | ||
|
|
421585dd72 | ||
|
|
0bab9903ee | ||
|
|
700deb0a81 | ||
|
|
1222b78ec4 | ||
|
|
0ee16b51c4 | ||
|
|
26001d520a | ||
|
|
8bf265c775 | ||
|
|
62f0bbb598 | ||
|
|
d5f4c74697 | ||
|
|
8f42532b6d | ||
|
|
2288a5c617 | ||
|
|
957311f479 | ||
|
|
2cc381b91d | ||
|
|
f406dbbb4d | ||
|
|
3b2322285a | ||
|
|
47d093e863 | ||
|
|
b2ae94de5b | ||
|
|
4afea1ebaf | ||
|
|
711736054f | ||
|
|
d64212d902 | ||
|
|
8913679d88 | ||
|
|
4f9a80e2d3 | ||
|
|
aa93b39d9b | ||
|
|
101f82c6b3 | ||
|
|
d35673efc6 | ||
|
|
3286d1992b | ||
|
|
4ac662d144 | ||
|
|
d73a418a55 | ||
|
|
306a3e0cd7 | ||
|
|
975a53c9e3 | ||
|
|
78fdc5805b | ||
|
|
8f9d5af26d | ||
|
|
6ff5787b40 | ||
|
|
3c1c6d2f01 | ||
|
|
0272a7f405 | ||
|
|
e1d34ef427 | ||
|
|
26b4698212 | ||
|
|
2871268505 | ||
|
|
744828a4de |
46
.github/ISSUE_TEMPLATE/Bug.md
vendored
46
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -5,19 +5,31 @@ about: Report a problem with rclone
|
|||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
||||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
We understand you are having a problem with rclone; we want to help you with that!
|
||||||
|
|
||||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
**STOP and READ**
|
||||||
|
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||||
|
Please show the effort you've put in to solving the problem and please be specific.
|
||||||
|
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||||
|
|
||||||
|
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||||
|
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||||
|
|
||||||
|
If you can still replicate it or just got a question then please use the rclone forum:
|
||||||
|
|
||||||
https://forum.rclone.org/
|
https://forum.rclone.org/
|
||||||
|
|
||||||
instead of filing an issue for a quick response.
|
for a quick response instead of filing an issue on this repo.
|
||||||
|
|
||||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
If nothing else helps, then please fill in the info below which helps us help you.
|
||||||
|
|
||||||
https://beta.rclone.org/
|
**DO NOT REDACT** any information except passwords/keys/personal info.
|
||||||
|
|
||||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
You should use 3 backticks to begin and end your paste to make it readable.
|
||||||
|
|
||||||
|
Make sure to include a log obtained with '-vv'.
|
||||||
|
|
||||||
|
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
|
||||||
|
|
||||||
Thank you
|
Thank you
|
||||||
|
|
||||||
@@ -25,6 +37,10 @@ The Rclone Developers
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
#### The associated forum post URL from `https://forum.rclone.org`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### What is the problem you are having with rclone?
|
#### What is the problem you are having with rclone?
|
||||||
|
|
||||||
|
|
||||||
@@ -33,18 +49,26 @@ The Rclone Developers
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
#### Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Which cloud storage system are you using? (eg Google Drive)
|
#### Which cloud storage system are you using? (e.g. Google Drive)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
#### The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<!--- Please keep the note below for others who read your bug report. -->
|
||||||
|
|
||||||
|
#### How to use GitHub
|
||||||
|
|
||||||
|
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
||||||
|
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
||||||
|
* Subscribe to receive notifications on status change and new comments.
|
||||||
|
|||||||
23
.github/ISSUE_TEMPLATE/Feature.md
vendored
23
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -7,12 +7,16 @@ about: Suggest a new feature or enhancement for rclone
|
|||||||
|
|
||||||
Welcome :-)
|
Welcome :-)
|
||||||
|
|
||||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
So you've got an idea to improve rclone? We love that!
|
||||||
|
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||||
|
|
||||||
Here is a checklist of things to do:
|
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
|
||||||
|
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||||
|
|
||||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
If it still isn't there, here is a checklist of things to do:
|
||||||
2. Discuss on the forum first: https://forum.rclone.org/
|
|
||||||
|
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
|
||||||
|
2. Discuss on the forum: https://forum.rclone.org/
|
||||||
3. Make a feature request issue (this is the right place!).
|
3. Make a feature request issue (this is the right place!).
|
||||||
4. Be prepared to get involved making the feature :-)
|
4. Be prepared to get involved making the feature :-)
|
||||||
|
|
||||||
@@ -22,6 +26,9 @@ The Rclone Developers
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
#### The associated forum post URL from `https://forum.rclone.org`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### What is your current rclone version (output from `rclone version`)?
|
#### What is your current rclone version (output from `rclone version`)?
|
||||||
|
|
||||||
@@ -34,3 +41,11 @@ The Rclone Developers
|
|||||||
#### How do you think rclone should be changed to solve that?
|
#### How do you think rclone should be changed to solve that?
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<!--- Please keep the note below for others who read your feature request. -->
|
||||||
|
|
||||||
|
#### How to use GitHub
|
||||||
|
|
||||||
|
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
||||||
|
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
||||||
|
* Subscribe to receive notifications on status change and new comments.
|
||||||
|
|||||||
262
.github/workflows/build.yml
vendored
262
.github/workflows/build.yml
vendored
@@ -12,116 +12,118 @@ on:
|
|||||||
tags:
|
tags:
|
||||||
- '*'
|
- '*'
|
||||||
pull_request:
|
pull_request:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
manual:
|
||||||
|
required: true
|
||||||
|
default: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'race', 'go1.11', 'go1.12', 'go1.13']
|
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.14.x'
|
go: '1.16.x'
|
||||||
modules: 'on'
|
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
quicktest: true
|
quicktest: true
|
||||||
|
racequicktest: true
|
||||||
|
librclonetest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: mac
|
- job_name: mac_amd64
|
||||||
os: macOS-latest
|
os: macOS-latest
|
||||||
go: '1.14.x'
|
go: '1.16.x'
|
||||||
modules: 'on'
|
gotags: 'cmount'
|
||||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
|
- job_name: mac_arm64
|
||||||
|
os: macOS-latest
|
||||||
|
go: '1.16.x'
|
||||||
|
gotags: 'cmount'
|
||||||
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows_amd64
|
- job_name: windows_amd64
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '1.14.x'
|
go: '1.16.x'
|
||||||
modules: 'on'
|
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^windows/amd64" -cgo'
|
build_flags: '-include "^windows/amd64" -cgo'
|
||||||
|
build_args: '-buildmode exe'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows_386
|
- job_name: windows_386
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '1.14.x'
|
go: '1.16.x'
|
||||||
modules: 'on'
|
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
goarch: '386'
|
goarch: '386'
|
||||||
cgo: '1'
|
cgo: '1'
|
||||||
build_flags: '-include "^windows/386" -cgo'
|
build_flags: '-include "^windows/386" -cgo'
|
||||||
|
build_args: '-buildmode exe'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.14.x'
|
go: '1.16.x'
|
||||||
modules: 'on'
|
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: race
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.14.x'
|
|
||||||
modules: 'on'
|
|
||||||
quicktest: true
|
|
||||||
racequicktest: true
|
|
||||||
|
|
||||||
- job_name: go1.11
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.11.x'
|
|
||||||
modules: 'on'
|
|
||||||
quicktest: true
|
|
||||||
|
|
||||||
- job_name: go1.12
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.12.x'
|
|
||||||
modules: 'on'
|
|
||||||
quicktest: true
|
|
||||||
|
|
||||||
- job_name: go1.13
|
- job_name: go1.13
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.13.x'
|
go: '1.13.x'
|
||||||
modules: 'on'
|
|
||||||
quicktest: true
|
quicktest: true
|
||||||
|
|
||||||
|
- job_name: go1.14
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.14.x'
|
||||||
|
quicktest: true
|
||||||
|
racequicktest: true
|
||||||
|
|
||||||
|
- job_name: go1.15
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.15.x'
|
||||||
|
quicktest: true
|
||||||
|
racequicktest: true
|
||||||
|
|
||||||
name: ${{ matrix.job_name }}
|
name: ${{ matrix.job_name }}
|
||||||
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
fetch-depth: 0
|
||||||
path: ./src/github.com/rclone/rclone
|
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
|
stable: 'false'
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
|
|
||||||
- name: Set environment variables
|
- name: Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||||
echo '::add-path::${{ runner.workspace }}/bin'
|
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||||
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
|
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
|
||||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
|
||||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
|
||||||
|
|
||||||
- name: Install Libraries on Linux
|
- name: Install Libraries on Linux
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -136,7 +138,7 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
brew update
|
brew update
|
||||||
brew cask install osxfuse
|
brew install --cask macfuse
|
||||||
if: matrix.os == 'macOS-latest'
|
if: matrix.os == 'macOS-latest'
|
||||||
|
|
||||||
- name: Install Libraries on Windows
|
- name: Install Libraries on Windows
|
||||||
@@ -144,10 +146,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
$ProgressPreference = 'SilentlyContinue'
|
$ProgressPreference = 'SilentlyContinue'
|
||||||
choco install -y winfsp zip
|
choco install -y winfsp zip
|
||||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||||
if ($env:GOARCH -eq "386") {
|
if ($env:GOARCH -eq "386") {
|
||||||
choco install -y mingw --forcex86 --force
|
choco install -y mingw --forcex86 --force
|
||||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||||
}
|
}
|
||||||
# Copy mingw32-make.exe to make.exe so the same command line
|
# Copy mingw32-make.exe to make.exe so the same command line
|
||||||
# can be used on Windows as on macOS and Linux
|
# can be used on Windows as on macOS and Linux
|
||||||
@@ -167,10 +169,22 @@ jobs:
|
|||||||
printf "\n\nSystem environment:\n\n"
|
printf "\n\nSystem environment:\n\n"
|
||||||
env
|
env
|
||||||
|
|
||||||
- name: Run tests
|
- name: Go module cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
|
- name: Build rclone
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
make quicktest
|
make quicktest
|
||||||
if: matrix.quicktest
|
if: matrix.quicktest
|
||||||
|
|
||||||
@@ -180,6 +194,14 @@ jobs:
|
|||||||
make racequicktest
|
make racequicktest
|
||||||
if: matrix.racequicktest
|
if: matrix.racequicktest
|
||||||
|
|
||||||
|
- name: Run librclone tests
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
make -C librclone/ctest test
|
||||||
|
make -C librclone/ctest clean
|
||||||
|
librclone/python/test_rclone.py
|
||||||
|
if: matrix.librclonetest
|
||||||
|
|
||||||
- name: Code quality test
|
- name: Code quality test
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -206,50 +228,110 @@ jobs:
|
|||||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|
||||||
xgo:
|
android:
|
||||||
timeout-minutes: 60
|
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||||
name: "xgo cross compile"
|
timeout-minutes: 30
|
||||||
runs-on: ubuntu-latest
|
name: "android-all"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
steps:
|
# Upgrade together with NDK version
|
||||||
|
- name: Set up Go 1.14
|
||||||
|
uses: actions/setup-go@v1
|
||||||
|
with:
|
||||||
|
go-version: 1.14
|
||||||
|
|
||||||
- name: Checkout
|
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||||
uses: actions/checkout@v1
|
- name: Force NDK version
|
||||||
with:
|
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
|
||||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
|
||||||
path: ./src/github.com/rclone/rclone
|
|
||||||
|
|
||||||
- name: Set environment variables
|
- name: Go module cache
|
||||||
shell: bash
|
uses: actions/cache@v2
|
||||||
run: |
|
with:
|
||||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
path: ~/go/pkg/mod
|
||||||
echo '::add-path::${{ runner.workspace }}/bin'
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
- name: Cross-compile rclone
|
- name: Set global environment variables
|
||||||
run: |
|
shell: bash
|
||||||
docker pull billziss/xgo-cgofuse
|
run: |
|
||||||
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
|
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||||
# xgo \
|
|
||||||
# -image=billziss/xgo-cgofuse \
|
|
||||||
# -targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
|
||||||
# -tags cmount \
|
|
||||||
# -dest build \
|
|
||||||
# .
|
|
||||||
xgo \
|
|
||||||
-image=billziss/xgo-cgofuse \
|
|
||||||
-targets=android/*,ios/* \
|
|
||||||
-dest build \
|
|
||||||
.
|
|
||||||
|
|
||||||
- name: Build rclone
|
- name: build native rclone
|
||||||
run: |
|
run: |
|
||||||
docker pull golang
|
make
|
||||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=mod -v
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: install gomobile
|
||||||
run: |
|
run: |
|
||||||
make ci_upload
|
go get golang.org/x/mobile/cmd/gobind
|
||||||
env:
|
go get golang.org/x/mobile/cmd/gomobile
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
env PATH=$PATH:~/go/bin gomobile init
|
||||||
# Upload artifacts if not a PR && not a fork
|
|
||||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
- name: arm-v7a gomobile build
|
||||||
|
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||||
|
|
||||||
|
- name: arm-v7a Set environment variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||||
|
echo 'GOARM=7' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
- name: arm-v7a build
|
||||||
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||||
|
|
||||||
|
- name: arm64-v8a Set environment variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: arm64-v8a build
|
||||||
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||||
|
|
||||||
|
- name: x86 Set environment variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: x86 build
|
||||||
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||||
|
|
||||||
|
- name: x64 Set environment variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: x64 build
|
||||||
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
run: |
|
||||||
|
make ci_upload
|
||||||
|
env:
|
||||||
|
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||||
|
# Upload artifacts if not a PR && not a fork
|
||||||
|
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
if: github.repository == 'rclone/rclone'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
@@ -15,7 +16,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Build and publish image
|
- name: Build and publish image
|
||||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
uses: ilteoood/docker_buildx@1.1.0
|
||||||
with:
|
with:
|
||||||
tag: beta
|
tag: beta
|
||||||
imageName: rclone/rclone
|
imageName: rclone/rclone
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
if: github.repository == 'rclone/rclone'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
@@ -23,7 +24,7 @@ jobs:
|
|||||||
id: actual_major_version
|
id: actual_major_version
|
||||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||||
- name: Build and publish image
|
- name: Build and publish image
|
||||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
uses: ilteoood/docker_buildx@1.1.0
|
||||||
with:
|
with:
|
||||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||||
imageName: rclone/rclone
|
imageName: rclone/rclone
|
||||||
@@ -31,3 +32,40 @@ jobs:
|
|||||||
publish: true
|
publish: true
|
||||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
|
|
||||||
|
build_docker_volume_plugin:
|
||||||
|
if: github.repository == 'rclone/rclone'
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build and publish docker volume plugin
|
||||||
|
steps:
|
||||||
|
- name: Checkout master
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set plugin parameters
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
GITHUB_REF=${{ github.ref }}
|
||||||
|
|
||||||
|
PLUGIN_IMAGE_USER=rclone
|
||||||
|
PLUGIN_IMAGE_NAME=docker-volume-rclone
|
||||||
|
PLUGIN_IMAGE_TAG=${GITHUB_REF#refs/tags/}
|
||||||
|
PLUGIN_IMAGE=${PLUGIN_IMAGE_USER}/${PLUGIN_IMAGE_NAME}:${PLUGIN_IMAGE_TAG}
|
||||||
|
PLUGIN_IMAGE_LATEST=${PLUGIN_IMAGE_USER}/${PLUGIN_IMAGE_NAME}:latest
|
||||||
|
|
||||||
|
echo "PLUGIN_IMAGE_USER=${PLUGIN_IMAGE_USER}" >> $GITHUB_ENV
|
||||||
|
echo "PLUGIN_IMAGE_NAME=${PLUGIN_IMAGE_NAME}" >> $GITHUB_ENV
|
||||||
|
echo "PLUGIN_IMAGE_TAG=${PLUGIN_IMAGE_TAG}" >> $GITHUB_ENV
|
||||||
|
echo "PLUGIN_IMAGE=${PLUGIN_IMAGE}" >> $GITHUB_ENV
|
||||||
|
echo "PLUGIN_IMAGE_LATEST=${PLUGIN_IMAGE_LATEST}" >> $GITHUB_ENV
|
||||||
|
- name: Build image
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
make docker-plugin
|
||||||
|
- name: Push image
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
docker login -u ${{ secrets.DOCKER_HUB_USER }} -p ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
|
make docker-plugin-push PLUGIN_IMAGE=${PLUGIN_IMAGE}
|
||||||
|
make docker-plugin-push PLUGIN_IMAGE=${PLUGIN_IMAGE_LATEST}
|
||||||
|
|||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,6 +1,7 @@
|
|||||||
*~
|
*~
|
||||||
_junk/
|
_junk/
|
||||||
rclone
|
rclone
|
||||||
|
rclone.exe
|
||||||
build
|
build
|
||||||
docs/public
|
docs/public
|
||||||
rclone.iml
|
rclone.iml
|
||||||
@@ -9,3 +10,7 @@ rclone.iml
|
|||||||
*.test
|
*.test
|
||||||
*.log
|
*.log
|
||||||
*.iml
|
*.iml
|
||||||
|
fuzz-build.zip
|
||||||
|
*.orig
|
||||||
|
*.rej
|
||||||
|
Thumbs.db
|
||||||
|
|||||||
192
CONTRIBUTING.md
192
CONTRIBUTING.md
@@ -12,94 +12,162 @@ When filing an issue, please include the following information if
|
|||||||
possible as well as a description of the problem. Make sure you test
|
possible as well as a description of the problem. Make sure you test
|
||||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||||
|
|
||||||
* Rclone version (eg output from `rclone -V`)
|
* Rclone version (e.g. output from `rclone version`)
|
||||||
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||||
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||||
|
|
||||||
## Submitting a pull request ##
|
## Submitting a new feature or bug fix ##
|
||||||
|
|
||||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||||
like to implement then please submit a pull request via GitHub.
|
like to implement then please submit a pull request via GitHub.
|
||||||
|
|
||||||
If it is a big feature then make an issue first so it can be discussed.
|
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
||||||
|
|
||||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
To prepare your pull request first press the fork button on [rclone's GitHub
|
||||||
getting started docs](https://golang.org/doc/install) for more info.
|
|
||||||
|
|
||||||
First in your web browser press the fork button on [rclone's GitHub
|
|
||||||
page](https://github.com/rclone/rclone).
|
page](https://github.com/rclone/rclone).
|
||||||
|
|
||||||
Now in your terminal
|
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||||
|
|
||||||
go get -u github.com/rclone/rclone
|
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
||||||
cd $GOPATH/src/github.com/rclone/rclone
|
|
||||||
|
git clone https://github.com/rclone/rclone.git
|
||||||
|
cd rclone
|
||||||
git remote rename origin upstream
|
git remote rename origin upstream
|
||||||
|
# if you have SSH keys setup in your GitHub account:
|
||||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||||
|
# otherwise:
|
||||||
|
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||||
|
|
||||||
Make a branch to add your new feature
|
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
||||||
|
|
||||||
|
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||||
|
|
||||||
|
go version
|
||||||
|
|
||||||
|
Great, you can now compile and execute your own version of rclone:
|
||||||
|
|
||||||
|
go build
|
||||||
|
./rclone version
|
||||||
|
|
||||||
|
Finally make a branch to add your new feature
|
||||||
|
|
||||||
git checkout -b my-new-feature
|
git checkout -b my-new-feature
|
||||||
|
|
||||||
And get hacking.
|
And get hacking.
|
||||||
|
|
||||||
When ready - run the unit tests for the code you changed
|
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
||||||
|
|
||||||
|
When ready - test the affected functionality and run the unit tests for the code you changed
|
||||||
|
|
||||||
|
cd folder/with/changed/files
|
||||||
go test -v
|
go test -v
|
||||||
|
|
||||||
Note that you may need to make a test remote, eg `TestSwift` for some
|
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||||
of the unit tests.
|
of the unit tests.
|
||||||
|
|
||||||
Note the top level Makefile targets
|
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
||||||
|
|
||||||
* make check
|
|
||||||
* make test
|
|
||||||
|
|
||||||
Both of these will be run by Travis when you make a pull request but
|
|
||||||
you can do this yourself locally too. These require some extra go
|
|
||||||
packages which you can install with
|
|
||||||
|
|
||||||
* make build_dep
|
|
||||||
|
|
||||||
Make sure you
|
Make sure you
|
||||||
|
|
||||||
|
* Add [unit tests](#testing) for a new feature.
|
||||||
* Add [documentation](#writing-documentation) for a new feature.
|
* Add [documentation](#writing-documentation) for a new feature.
|
||||||
* Follow the [commit message guidelines](#commit-messages).
|
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||||
* Add [unit tests](#testing) for a new feature
|
|
||||||
* squash commits down to one per feature
|
|
||||||
* rebase to master with `git rebase master`
|
|
||||||
|
|
||||||
When you are done with that
|
When you are done with that push your changes to Github:
|
||||||
|
|
||||||
git push origin my-new-feature
|
git push -u origin my-new-feature
|
||||||
|
|
||||||
Go to the GitHub website and click [Create pull
|
and open the GitHub website to [create your pull
|
||||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||||
|
|
||||||
You patch will get reviewed and you might get asked to fix some stuff.
|
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
||||||
|
|
||||||
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
|
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||||
```
|
|
||||||
git log # See how many commits you want to squash
|
|
||||||
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
|
||||||
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
|
||||||
git commit # Add a new commit message.
|
|
||||||
git push --force # Push the squashed commit to your GitHub repo.
|
|
||||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
|
|
||||||
```
|
|
||||||
|
|
||||||
## CI for your fork ##
|
## Using Git and Github ##
|
||||||
|
|
||||||
|
### Committing your changes ###
|
||||||
|
|
||||||
|
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||||
|
|
||||||
|
git checkout my-new-feature # To switch to your branch
|
||||||
|
git status # To see the new and changed files
|
||||||
|
git add FILENAME # To select FILENAME for the commit
|
||||||
|
git status # To verify the changes to be committed
|
||||||
|
git commit # To do the commit
|
||||||
|
git log # To verify the commit. Use q to quit the log
|
||||||
|
|
||||||
|
You can modify the message or changes in the latest commit using:
|
||||||
|
|
||||||
|
git commit --amend
|
||||||
|
|
||||||
|
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
|
|
||||||
|
### Replacing your previously pushed commits ###
|
||||||
|
|
||||||
|
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||||
|
|
||||||
|
Your previously pushed commits are replaced by:
|
||||||
|
|
||||||
|
git push --force origin my-new-feature
|
||||||
|
|
||||||
|
### Basing your changes on the latest master ###
|
||||||
|
|
||||||
|
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||||
|
|
||||||
|
git checkout master
|
||||||
|
git fetch upstream
|
||||||
|
git merge --ff-only
|
||||||
|
git push origin --follow-tags # optional update of your fork in GitHub
|
||||||
|
git checkout my-new-feature
|
||||||
|
git rebase master
|
||||||
|
|
||||||
|
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
|
|
||||||
|
### Squashing your commits ###
|
||||||
|
|
||||||
|
To combine your commits into one commit:
|
||||||
|
|
||||||
|
git log # To count the commits to squash, e.g. the last 2
|
||||||
|
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||||
|
git status # To check everything is as expected
|
||||||
|
|
||||||
|
If everything is fine, then make the new combined commit:
|
||||||
|
|
||||||
|
git commit # To commit the undone commits as one
|
||||||
|
|
||||||
|
otherwise, you may roll back using:
|
||||||
|
|
||||||
|
git reflog # To check that HEAD{1} is your previous state
|
||||||
|
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||||
|
|
||||||
|
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
|
|
||||||
|
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||||
|
|
||||||
|
### GitHub Continuous Integration ###
|
||||||
|
|
||||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||||
|
|
||||||
## Testing ##
|
## Testing ##
|
||||||
|
|
||||||
|
### Quick testing ###
|
||||||
|
|
||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
|
|
||||||
go test -v ./...
|
go test -v ./...
|
||||||
|
|
||||||
|
You can also use `make`, if supported by your platform
|
||||||
|
|
||||||
|
make quicktest
|
||||||
|
|
||||||
|
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||||
|
|
||||||
|
### Backend testing ###
|
||||||
|
|
||||||
rclone contains a mixture of unit tests and integration tests.
|
rclone contains a mixture of unit tests and integration tests.
|
||||||
Because it is difficult (and in some respects pointless) to test cloud
|
Because it is difficult (and in some respects pointless) to test cloud
|
||||||
storage systems by mocking all their interfaces, rclone unit tests can
|
storage systems by mocking all their interfaces, rclone unit tests can
|
||||||
@@ -115,8 +183,8 @@ are skipped if `TestDrive:` isn't defined.
|
|||||||
cd backend/drive
|
cd backend/drive
|
||||||
go test -v
|
go test -v
|
||||||
|
|
||||||
You can then run the integration tests which tests all of rclone's
|
You can then run the integration tests which test all of rclone's
|
||||||
operations. Normally these get run against the local filing system,
|
operations. Normally these get run against the local file system,
|
||||||
but they can be run against any of the remotes.
|
but they can be run against any of the remotes.
|
||||||
|
|
||||||
cd fs/sync
|
cd fs/sync
|
||||||
@@ -127,18 +195,25 @@ but they can be run against any of the remotes.
|
|||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive:
|
||||||
|
|
||||||
If you want to use the integration test framework to run these tests
|
If you want to use the integration test framework to run these tests
|
||||||
all together with an HTML report and test retries then from the
|
altogether with an HTML report and test retries then from the
|
||||||
project root:
|
project root:
|
||||||
|
|
||||||
go install github.com/rclone/rclone/fstest/test_all
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
test_all -backend drive
|
test_all -backend drive
|
||||||
|
|
||||||
|
### Full integration testing ###
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
|
|
||||||
|
make check
|
||||||
make test
|
make test
|
||||||
|
|
||||||
This command is run daily on the integration test server. You can
|
The commands may require some extra go packages which you can install with
|
||||||
|
|
||||||
|
make build_dep
|
||||||
|
|
||||||
|
The full integration tests are run daily on the integration test server. You can
|
||||||
find the results at https://pub.rclone.org/integration-tests/
|
find the results at https://pub.rclone.org/integration-tests/
|
||||||
|
|
||||||
## Code Organisation ##
|
## Code Organisation ##
|
||||||
@@ -153,6 +228,7 @@ with modules beneath.
|
|||||||
* cmd - the rclone commands
|
* cmd - the rclone commands
|
||||||
* all - import this to load all the commands
|
* all - import this to load all the commands
|
||||||
* ...commands
|
* ...commands
|
||||||
|
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||||
* docs - the documentation and website
|
* docs - the documentation and website
|
||||||
* content - adjust these docs only - everything else is autogenerated
|
* content - adjust these docs only - everything else is autogenerated
|
||||||
* command - these are auto generated - edit the corresponding .go file
|
* command - these are auto generated - edit the corresponding .go file
|
||||||
@@ -170,7 +246,7 @@ with modules beneath.
|
|||||||
* log - logging facilities
|
* log - logging facilities
|
||||||
* march - iterates directories in lock step
|
* march - iterates directories in lock step
|
||||||
* object - in memory Fs objects
|
* object - in memory Fs objects
|
||||||
* operations - primitives for sync, eg Copy, Move
|
* operations - primitives for sync, e.g. Copy, Move
|
||||||
* sync - sync directories
|
* sync - sync directories
|
||||||
* walk - walk a directory
|
* walk - walk a directory
|
||||||
* fstest - provides integration test framework
|
* fstest - provides integration test framework
|
||||||
@@ -178,7 +254,7 @@ with modules beneath.
|
|||||||
* mockdir - mocks an fs.Directory
|
* mockdir - mocks an fs.Directory
|
||||||
* mockobject - mocks an fs.Object
|
* mockobject - mocks an fs.Object
|
||||||
* test_all - Runs integration tests for everything
|
* test_all - Runs integration tests for everything
|
||||||
* graphics - the images used in the website etc
|
* graphics - the images used in the website, etc.
|
||||||
* lib - libraries used by the backend
|
* lib - libraries used by the backend
|
||||||
* atexit - register functions to run when rclone exits
|
* atexit - register functions to run when rclone exits
|
||||||
* dircache - directory ID to name caching
|
* dircache - directory ID to name caching
|
||||||
@@ -202,12 +278,12 @@ for the flag help, the remainder is shown to the user in `rclone
|
|||||||
config` and is added to the docs with `make backenddocs`.
|
config` and is added to the docs with `make backenddocs`.
|
||||||
|
|
||||||
The only documentation you need to edit are the `docs/content/*.md`
|
The only documentation you need to edit are the `docs/content/*.md`
|
||||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
|
||||||
from those during the release process. See the `make doc` and `make
|
from those during the release process. See the `make doc` and `make
|
||||||
website` targets in the Makefile if you are interested in how. You
|
website` targets in the Makefile if you are interested in how. You
|
||||||
don't need to run these when adding a feature.
|
don't need to run these when adding a feature.
|
||||||
|
|
||||||
Documentation for rclone sub commands is with their code, eg
|
Documentation for rclone sub commands is with their code, e.g.
|
||||||
`cmd/ls/ls.go`.
|
`cmd/ls/ls.go`.
|
||||||
|
|
||||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||||
@@ -265,7 +341,7 @@ rclone uses the [go
|
|||||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||||
support in go1.11 and later to manage its dependencies.
|
support in go1.11 and later to manage its dependencies.
|
||||||
|
|
||||||
rclone can be built with modules outside of the GOPATH
|
rclone can be built with modules outside of the `GOPATH`.
|
||||||
|
|
||||||
To add a dependency `github.com/ncw/new_dependency` see the
|
To add a dependency `github.com/ncw/new_dependency` see the
|
||||||
instructions below. These will fetch the dependency and add it to
|
instructions below. These will fetch the dependency and add it to
|
||||||
@@ -333,8 +409,8 @@ Getting going
|
|||||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||||
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||||
* `rclone purge -v TestRemote:rclone-info`
|
* `rclone purge -v TestRemote:rclone-info`
|
||||||
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||||
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
|
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||||
* open `remote.csv` in a spreadsheet and examine
|
* open `remote.csv` in a spreadsheet and examine
|
||||||
|
|
||||||
Unit tests
|
Unit tests
|
||||||
@@ -364,7 +440,7 @@ See the [testing](#testing) section for more information on integration tests.
|
|||||||
|
|
||||||
Add your fs to the docs - you'll need to pick an icon for it from
|
Add your fs to the docs - you'll need to pick an icon for it from
|
||||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||||
alphabetical order of full name of remote (eg `drive` is ordered as
|
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||||
`Google Drive`) but with the local file system last.
|
`Google Drive`) but with the local file system last.
|
||||||
|
|
||||||
* `README.md` - main GitHub page
|
* `README.md` - main GitHub page
|
||||||
@@ -400,7 +476,7 @@ Usage
|
|||||||
- If this variable doesn't exist, plugin support is disabled.
|
- If this variable doesn't exist, plugin support is disabled.
|
||||||
- Plugins must be compiled against the exact version of rclone to work.
|
- Plugins must be compiled against the exact version of rclone to work.
|
||||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||||
|
|
||||||
Building
|
Building
|
||||||
|
|
||||||
To turn your existing additions into a Go plugin, move them to an external repository
|
To turn your existing additions into a Go plugin, move them to an external repository
|
||||||
|
|||||||
@@ -16,6 +16,8 @@ RUN apk --no-cache add ca-certificates fuse tzdata && \
|
|||||||
|
|
||||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||||
|
|
||||||
|
RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone
|
||||||
|
|
||||||
ENTRYPOINT [ "rclone" ]
|
ENTRYPOINT [ "rclone" ]
|
||||||
|
|
||||||
WORKDIR /data
|
WORKDIR /data
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ Current active maintainers of rclone are:
|
|||||||
| Fabian Möller | @B4dM4n | |
|
| Fabian Möller | @B4dM4n | |
|
||||||
| Alex Chen | @Cnly | onedrive backend |
|
| Alex Chen | @Cnly | onedrive backend |
|
||||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
|
||||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||||
| Max Sum | @Max-Sum | union backend |
|
| Max Sum | @Max-Sum | union backend |
|
||||||
| Fred | @creativeprojects | seafile backend |
|
| Fred | @creativeprojects | seafile backend |
|
||||||
@@ -37,7 +37,7 @@ Rclone uses the labels like this:
|
|||||||
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||||
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||||
* `maintenance` - internal enhancement, code re-organisation etc
|
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||||
* `Remote: XXX` - which rclone backend this affects
|
* `Remote: XXX` - which rclone backend this affects
|
||||||
@@ -45,7 +45,7 @@ Rclone uses the labels like this:
|
|||||||
|
|
||||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||||
|
|
||||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
|
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
|
||||||
|
|
||||||
The milestones have these meanings:
|
The milestones have these meanings:
|
||||||
|
|
||||||
|
|||||||
12814
MANUAL.html
generated
12814
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
16645
MANUAL.txt
generated
16645
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
115
Makefile
115
Makefile
@@ -7,27 +7,29 @@ RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
|||||||
VERSION := $(shell cat VERSION)
|
VERSION := $(shell cat VERSION)
|
||||||
# Last tag on this branch
|
# Last tag on this branch
|
||||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||||
|
# Next version
|
||||||
|
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
|
||||||
|
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
|
||||||
# If we are working on a release, override branch to master
|
# If we are working on a release, override branch to master
|
||||||
ifdef RELEASE_TAG
|
ifdef RELEASE_TAG
|
||||||
BRANCH := master
|
BRANCH := master
|
||||||
|
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
|
||||||
endif
|
endif
|
||||||
TAG_BRANCH := -$(BRANCH)
|
TAG_BRANCH := .$(BRANCH)
|
||||||
BRANCH_PATH := branch/
|
BRANCH_PATH := branch/$(BRANCH)/
|
||||||
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
||||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||||
TAG_BRANCH :=
|
TAG_BRANCH :=
|
||||||
BRANCH_PATH :=
|
BRANCH_PATH :=
|
||||||
endif
|
endif
|
||||||
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
|
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit)
|
||||||
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
|
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD)
|
||||||
# TAG is current version + number of commits since last release + branch
|
# TAG is current version + commit number + commit + branch
|
||||||
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
||||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
ifdef RELEASE_TAG
|
||||||
ifndef RELEASE_TAG
|
TAG := $(RELEASE_TAG)
|
||||||
TAG := $(TAG)-beta
|
|
||||||
endif
|
endif
|
||||||
GO_VERSION := $(shell go version)
|
GO_VERSION := $(shell go version)
|
||||||
GO_FILES := $(shell go list ./... )
|
|
||||||
ifdef BETA_SUBDIR
|
ifdef BETA_SUBDIR
|
||||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
@@ -44,20 +46,19 @@ endif
|
|||||||
.PHONY: rclone test_all vars version
|
.PHONY: rclone test_all vars version
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||||
mkdir -p `go env GOPATH`/bin/
|
mkdir -p `go env GOPATH`/bin/
|
||||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||||
|
|
||||||
test_all:
|
test_all:
|
||||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
@echo SHELL="'$(SHELL)'"
|
@echo SHELL="'$(SHELL)'"
|
||||||
@echo BRANCH="'$(BRANCH)'"
|
@echo BRANCH="'$(BRANCH)'"
|
||||||
@echo TAG="'$(TAG)'"
|
@echo TAG="'$(TAG)'"
|
||||||
@echo VERSION="'$(VERSION)'"
|
@echo VERSION="'$(VERSION)'"
|
||||||
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
|
|
||||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||||
@echo BETA_URL="'$(BETA_URL)'"
|
@echo BETA_URL="'$(BETA_URL)'"
|
||||||
|
|
||||||
@@ -75,10 +76,10 @@ test: rclone test_all
|
|||||||
|
|
||||||
# Quick test
|
# Quick test
|
||||||
quicktest:
|
quicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||||
|
|
||||||
racequicktest:
|
racequicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||||
|
|
||||||
# Do source code quality checks
|
# Do source code quality checks
|
||||||
check: rclone
|
check: rclone
|
||||||
@@ -92,8 +93,7 @@ build_dep:
|
|||||||
|
|
||||||
# Get the release dependencies we only install on linux
|
# Get the release dependencies we only install on linux
|
||||||
release_dep_linux:
|
release_dep_linux:
|
||||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
|
||||||
|
|
||||||
# Get the release dependencies we only install on Windows
|
# Get the release dependencies we only install on Windows
|
||||||
release_dep_windows:
|
release_dep_windows:
|
||||||
@@ -119,7 +119,7 @@ doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
|||||||
rclone.1: MANUAL.md
|
rclone.1: MANUAL.md
|
||||||
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
|
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
|
||||||
|
|
||||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs rcdocs
|
||||||
./bin/make_manual.py
|
./bin/make_manual.py
|
||||||
|
|
||||||
MANUAL.html: MANUAL.md
|
MANUAL.html: MANUAL.md
|
||||||
@@ -164,6 +164,11 @@ validate_website: website
|
|||||||
tarball:
|
tarball:
|
||||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||||
|
|
||||||
|
vendorball:
|
||||||
|
go mod vendor
|
||||||
|
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
|
||||||
|
rm -rf vendor
|
||||||
|
|
||||||
sign_upload:
|
sign_upload:
|
||||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||||
@@ -182,10 +187,10 @@ upload_github:
|
|||||||
./bin/upload-github $(TAG)
|
./bin/upload-github $(TAG)
|
||||||
|
|
||||||
cross: doc
|
cross: doc
|
||||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
|
|
||||||
beta:
|
beta:
|
||||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||||
|
|
||||||
@@ -193,23 +198,23 @@ log_since_last_release:
|
|||||||
git log $(LAST_TAG)..
|
git log $(LAST_TAG)..
|
||||||
|
|
||||||
compile_all:
|
compile_all:
|
||||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
|
|
||||||
ci_upload:
|
ci_upload:
|
||||||
sudo chown -R $$USER build
|
sudo chown -R $$USER build
|
||||||
find build -type l -delete
|
find build -type l -delete
|
||||||
gzip -r9v build
|
gzip -r9v build
|
||||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||||
ifndef BRANCH_PATH
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||||
|
|
||||||
ci_beta:
|
ci_beta:
|
||||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
ifndef BRANCH_PATH
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)
|
@echo Beta release ready at $(BETA_URL)
|
||||||
@@ -221,26 +226,66 @@ fetch_binaries:
|
|||||||
serve: website
|
serve: website
|
||||||
cd docs && hugo server -v -w --disableFastRender
|
cd docs && hugo server -v -w --disableFastRender
|
||||||
|
|
||||||
tag: doc
|
tag: retag doc
|
||||||
@echo "Old tag is $(VERSION)"
|
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||||
@echo "New tag is $(NEXT_VERSION)"
|
|
||||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
|
|
||||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
|
||||||
echo "$(NEXT_VERSION)" > VERSION
|
|
||||||
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
|
|
||||||
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
|
|
||||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||||
@echo "Then commit all the changes"
|
@echo "Then commit all the changes"
|
||||||
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
|
@echo git commit -m \"Version $(VERSION)\" -a -v
|
||||||
@echo "And finally run make retag before make cross etc"
|
@echo "And finally run make retag before make cross, etc."
|
||||||
|
|
||||||
retag:
|
retag:
|
||||||
|
@echo "Version is $(VERSION)"
|
||||||
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
||||||
|
|
||||||
startdev:
|
startdev:
|
||||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
|
@echo "Version is $(VERSION)"
|
||||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
@echo "Next version is $(NEXT_VERSION)"
|
||||||
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||||
|
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||||
|
echo "$(NEXT_VERSION)" > VERSION
|
||||||
|
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||||
|
|
||||||
|
startstable:
|
||||||
|
@echo "Version is $(VERSION)"
|
||||||
|
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||||
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||||
|
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||||
|
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||||
|
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||||
|
|
||||||
winzip:
|
winzip:
|
||||||
zip -9 rclone-$(TAG).zip rclone.exe
|
zip -9 rclone-$(TAG).zip rclone.exe
|
||||||
|
|
||||||
|
# docker volume plugin
|
||||||
|
PLUGIN_IMAGE_USER ?= rclone
|
||||||
|
PLUGIN_IMAGE_TAG ?= latest
|
||||||
|
PLUGIN_IMAGE_NAME ?= docker-volume-rclone
|
||||||
|
PLUGIN_IMAGE ?= $(PLUGIN_IMAGE_USER)/$(PLUGIN_IMAGE_NAME):$(PLUGIN_IMAGE_TAG)
|
||||||
|
|
||||||
|
PLUGIN_BASE_IMAGE := rclone/rclone:latest
|
||||||
|
PLUGIN_BUILD_DIR := ./build/docker-plugin
|
||||||
|
PLUGIN_CONTRIB_DIR := ./cmd/serve/docker/contrib/plugin
|
||||||
|
PLUGIN_CONFIG := $(PLUGIN_CONTRIB_DIR)/config.json
|
||||||
|
PLUGIN_DOCKERFILE := $(PLUGIN_CONTRIB_DIR)/Dockerfile
|
||||||
|
PLUGIN_CONTAINER := docker-volume-rclone-dev-$(shell date +'%Y%m%d-%H%M%S')
|
||||||
|
|
||||||
|
docker-plugin: docker-plugin-rootfs docker-plugin-create
|
||||||
|
|
||||||
|
docker-plugin-image: rclone
|
||||||
|
docker build --no-cache --pull --build-arg BASE_IMAGE=${PLUGIN_BASE_IMAGE} -t ${PLUGIN_IMAGE} -f ${PLUGIN_DOCKERFILE} .
|
||||||
|
|
||||||
|
docker-plugin-rootfs: docker-plugin-image
|
||||||
|
mkdir -p ${PLUGIN_BUILD_DIR}/rootfs
|
||||||
|
docker create --name ${PLUGIN_CONTAINER} ${PLUGIN_IMAGE}
|
||||||
|
docker export ${PLUGIN_CONTAINER} | tar -x -C ${PLUGIN_BUILD_DIR}/rootfs
|
||||||
|
docker rm -vf ${PLUGIN_CONTAINER}
|
||||||
|
cp ${PLUGIN_CONFIG} ${PLUGIN_BUILD_DIR}/config.json
|
||||||
|
|
||||||
|
docker-plugin-create:
|
||||||
|
docker plugin rm -f ${PLUGIN_IMAGE} 2>/dev/null || true
|
||||||
|
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
|
||||||
|
|
||||||
|
docker-plugin-push: docker-plugin-create
|
||||||
|
docker plugin push ${PLUGIN_IMAGE}
|
||||||
|
docker plugin rm ${PLUGIN_IMAGE}
|
||||||
|
|||||||
@@ -30,11 +30,13 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
|
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
|
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
@@ -60,13 +62,16 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
|
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||||
|
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
|
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||||
|
|
||||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||||
@@ -81,8 +86,8 @@ Please see [the full list of all storage providers and their features](https://r
|
|||||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||||
* Can sync to and from network, e.g. two different cloud accounts
|
* Can sync to and from network, e.g. two different cloud accounts
|
||||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||||
|
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
|
||||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||||
* Multi-threaded downloads to local disk
|
* Multi-threaded downloads to local disk
|
||||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||||
|
|||||||
79
RELEASE.md
79
RELEASE.md
@@ -4,12 +4,12 @@ This file describes how to make the various kinds of releases
|
|||||||
|
|
||||||
## Extra required software for making a release
|
## Extra required software for making a release
|
||||||
|
|
||||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
* [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||||
* pandoc for making the html and man pages
|
* pandoc for making the html and man pages
|
||||||
|
|
||||||
## Making a release
|
## Making a release
|
||||||
|
|
||||||
* git checkout master
|
* git checkout master # see below for stable branch
|
||||||
* git pull
|
* git pull
|
||||||
* git status - make sure everything is checked in
|
* git status - make sure everything is checked in
|
||||||
* Check GitHub actions build for master is Green
|
* Check GitHub actions build for master is Green
|
||||||
@@ -21,16 +21,17 @@ This file describes how to make the various kinds of releases
|
|||||||
* git status - to check for new man pages - git add them
|
* git status - to check for new man pages - git add them
|
||||||
* git commit -a -v -m "Version v1.XX.0"
|
* git commit -a -v -m "Version v1.XX.0"
|
||||||
* make retag
|
* make retag
|
||||||
* git push --tags origin master
|
* git push --follow-tags origin
|
||||||
* # Wait for the GitHub builds to complete then...
|
* # Wait for the GitHub builds to complete then...
|
||||||
* make fetch_binaries
|
* make fetch_binaries
|
||||||
* make tarball
|
* make tarball
|
||||||
|
* make vendorball
|
||||||
* make sign_upload
|
* make sign_upload
|
||||||
* make check_sign
|
* make check_sign
|
||||||
* make upload
|
* make upload
|
||||||
* make upload_website
|
* make upload_website
|
||||||
* make upload_github
|
* make upload_github
|
||||||
* make startdev
|
* make startdev # make startstable for stable branch
|
||||||
* # announce with forum post, twitter post, patreon post
|
* # announce with forum post, twitter post, patreon post
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies
|
Early in the next release cycle update the dependencies
|
||||||
@@ -41,66 +42,58 @@ Early in the next release cycle update the dependencies
|
|||||||
* git add new files
|
* git add new files
|
||||||
* git commit -a -v
|
* git commit -a -v
|
||||||
|
|
||||||
If `make update` fails with errors like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
# github.com/cpuguy83/go-md2man/md2man
|
|
||||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
|
||||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
|
||||||
```
|
|
||||||
|
|
||||||
Can be fixed with
|
|
||||||
|
|
||||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
|
||||||
* GO111MODULE=on go mod tidy
|
|
||||||
|
|
||||||
|
|
||||||
## Making a point release
|
## Making a point release
|
||||||
|
|
||||||
If rclone needs a point release due to some horrendous bug:
|
If rclone needs a point release due to some horrendous bug:
|
||||||
|
|
||||||
|
Set vars
|
||||||
|
|
||||||
|
* BASE_TAG=v1.XX # e.g. v1.52
|
||||||
|
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||||
|
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||||
|
|
||||||
First make the release branch. If this is a second point release then
|
First make the release branch. If this is a second point release then
|
||||||
this will be done already.
|
this will be done already.
|
||||||
|
|
||||||
* BASE_TAG=v1.XX # eg v1.52
|
|
||||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
|
||||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
|
||||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||||
|
* git co ${BASE_TAG}-stable
|
||||||
|
* make startstable
|
||||||
|
|
||||||
Now
|
Now
|
||||||
|
|
||||||
* git co ${BASE_TAG}-stable
|
* git co ${BASE_TAG}-stable
|
||||||
* git cherry-pick any fixes
|
* git cherry-pick any fixes
|
||||||
* Test (see above)
|
* Do the steps as above
|
||||||
* make NEXT_VERSION=${NEW_TAG} tag
|
* make startstable
|
||||||
* edit docs/content/changelog.md
|
|
||||||
* make TAG=${NEW_TAG} doc
|
|
||||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
|
||||||
* git tag -d ${NEW_TAG}
|
|
||||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
|
||||||
* git push --tags -u origin ${BASE_TAG}-stable
|
|
||||||
* Wait for builds to complete
|
|
||||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
|
||||||
* make TAG=${NEW_TAG} tarball
|
|
||||||
* make TAG=${NEW_TAG} sign_upload
|
|
||||||
* make TAG=${NEW_TAG} check_sign
|
|
||||||
* make TAG=${NEW_TAG} upload
|
|
||||||
* make TAG=${NEW_TAG} upload_website
|
|
||||||
* make TAG=${NEW_TAG} upload_github
|
|
||||||
* NB this overwrites the current beta so we need to do this
|
|
||||||
* git co master
|
* git co master
|
||||||
* make VERSION=${NEW_TAG} startdev
|
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||||
* # cherry pick the changes to the changelog and VERSION
|
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||||
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
|
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||||
* git commit --amend
|
|
||||||
* git push
|
* git push
|
||||||
* Announce!
|
|
||||||
|
|
||||||
## Making a manual build of docker
|
## Making a manual build of docker
|
||||||
|
|
||||||
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||||
or needs to be updated then rebuild like this.
|
or needs to be updated then rebuild like this.
|
||||||
|
|
||||||
|
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||||
|
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||||
|
|
||||||
|
```
|
||||||
|
git co v1.54.1
|
||||||
|
docker pull golang
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
docker buildx create --name actions_builder --use
|
||||||
|
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||||
|
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||||
|
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||||
|
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||||
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
|
docker buildx stop actions_builder
|
||||||
|
```
|
||||||
|
|
||||||
|
### Old build for linux/amd64 only
|
||||||
|
|
||||||
```
|
```
|
||||||
docker pull golang
|
docker pull golang
|
||||||
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
package alias
|
package alias
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/cache"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
@@ -33,7 +35,7 @@ type Options struct {
|
|||||||
// NewFs constructs an Fs from the path.
|
// NewFs constructs an Fs from the path.
|
||||||
//
|
//
|
||||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -46,9 +48,5 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if strings.HasPrefix(opt.Remote, name+":") {
|
if strings.HasPrefix(opt.Remote, name+":") {
|
||||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
return cache.Get(ctx, fspath.JoinRootPath(opt.Remote, root))
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configfile"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -19,7 +20,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func prepare(t *testing.T, root string) {
|
func prepare(t *testing.T, root string) {
|
||||||
config.LoadConfig()
|
configfile.Install()
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.FileSet(remoteName, "type", "alias")
|
config.FileSet(remoteName, "type", "alias")
|
||||||
@@ -54,21 +55,22 @@ func TestNewFS(t *testing.T) {
|
|||||||
{"four/under four.txt", 9, false},
|
{"four/under four.txt", 9, false},
|
||||||
}},
|
}},
|
||||||
{"four", "..", "", true, []testEntry{
|
{"four", "..", "", true, []testEntry{
|
||||||
{"four", -1, true},
|
{"five", -1, true},
|
||||||
{"one%.txt", 6, false},
|
{"under four.txt", 9, false},
|
||||||
{"three", -1, true},
|
|
||||||
{"two.html", 7, false},
|
|
||||||
}},
|
}},
|
||||||
{"four", "../three", "", true, []testEntry{
|
{"", "../../three", "", true, []testEntry{
|
||||||
{"underthree.txt", 9, false},
|
{"underthree.txt", 9, false},
|
||||||
}},
|
}},
|
||||||
|
{"four", "../../five", "", true, []testEntry{
|
||||||
|
{"underfive.txt", 6, false},
|
||||||
|
}},
|
||||||
} {
|
} {
|
||||||
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
||||||
|
|
||||||
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
prepare(t, remoteRoot)
|
prepare(t, remoteRoot)
|
||||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
gotEntries, err := f.List(context.Background(), test.fsList)
|
gotEntries, err := f.List(context.Background(), test.fsList)
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
@@ -90,7 +92,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
|
|
||||||
func TestNewFSNoRemote(t *testing.T) {
|
func TestNewFSNoRemote(t *testing.T) {
|
||||||
prepare(t, "")
|
prepare(t, "")
|
||||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
|
||||||
|
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Nil(t, f)
|
require.Nil(t, f)
|
||||||
@@ -98,7 +100,7 @@ func TestNewFSNoRemote(t *testing.T) {
|
|||||||
|
|
||||||
func TestNewFSInvalidRemote(t *testing.T) {
|
func TestNewFSInvalidRemote(t *testing.T) {
|
||||||
prepare(t, "not_existing_test_remote:")
|
prepare(t, "not_existing_test_remote:")
|
||||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
|
||||||
|
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Nil(t, f)
|
require.Nil(t, f)
|
||||||
|
|||||||
@@ -9,13 +9,16 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/rclone/rclone/backend/chunker"
|
_ "github.com/rclone/rclone/backend/chunker"
|
||||||
|
_ "github.com/rclone/rclone/backend/compress"
|
||||||
_ "github.com/rclone/rclone/backend/crypt"
|
_ "github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
_ "github.com/rclone/rclone/backend/dropbox"
|
_ "github.com/rclone/rclone/backend/dropbox"
|
||||||
_ "github.com/rclone/rclone/backend/fichier"
|
_ "github.com/rclone/rclone/backend/fichier"
|
||||||
|
_ "github.com/rclone/rclone/backend/filefabric"
|
||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
|
_ "github.com/rclone/rclone/backend/hdfs"
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
_ "github.com/rclone/rclone/backend/hubic"
|
_ "github.com/rclone/rclone/backend/hubic"
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
@@ -38,6 +41,8 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
|
_ "github.com/rclone/rclone/backend/uptobox"
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
_ "github.com/rclone/rclone/backend/yandex"
|
_ "github.com/rclone/rclone/backend/yandex"
|
||||||
|
_ "github.com/rclone/rclone/backend/zoho"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -70,45 +69,28 @@ func init() {
|
|||||||
Prefix: "acd",
|
Prefix: "acd",
|
||||||
Description: "Amazon Drive",
|
Description: "Amazon Drive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig, nil)
|
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||||
if err != nil {
|
OAuth2Config: acdConfig,
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
})
|
||||||
}
|
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
|
||||||
Help: "Amazon Application Client ID.",
|
|
||||||
Required: true,
|
|
||||||
}, {
|
|
||||||
Name: config.ConfigClientSecret,
|
|
||||||
Help: "Amazon Application Client Secret.",
|
|
||||||
Required: true,
|
|
||||||
}, {
|
|
||||||
Name: config.ConfigAuthURL,
|
|
||||||
Help: "Auth server URL.\nLeave blank to use Amazon's.",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: config.ConfigTokenURL,
|
|
||||||
Help: "Token server url.\nleave blank to use Amazon's.",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "checkpoint",
|
Name: "checkpoint",
|
||||||
Help: "Checkpoint for internal polling (debug).",
|
Help: "Checkpoint for internal polling (debug).",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_wait_per_gb",
|
Name: "upload_wait_per_gb",
|
||||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
|
||||||
|
|
||||||
Sometimes Amazon Drive gives an error when a file has been fully
|
Sometimes Amazon Drive gives an error when a file has been fully
|
||||||
uploaded but the file appears anyway after a little while. This
|
uploaded but the file appears anyway after a little while. This
|
||||||
happens sometimes for files over 1GB in size and nearly every time for
|
happens sometimes for files over 1 GiB in size and nearly every time for
|
||||||
files bigger than 10GB. This parameter controls the time rclone waits
|
files bigger than 10 GiB. This parameter controls the time rclone waits
|
||||||
for the file to appear.
|
for the file to appear.
|
||||||
|
|
||||||
The default value for this parameter is 3 minutes per GB, so by
|
The default value for this parameter is 3 minutes per GiB, so by
|
||||||
default it will wait 3 minutes for every GB uploaded to see if the
|
default it will wait 3 minutes for every GiB uploaded to see if the
|
||||||
file appears.
|
file appears.
|
||||||
|
|
||||||
You can disable this feature by setting it to 0. This may cause
|
You can disable this feature by setting it to 0. This may cause
|
||||||
@@ -128,7 +110,7 @@ in this situation.`,
|
|||||||
|
|
||||||
Files this size or more will be downloaded via their "tempLink". This
|
Files this size or more will be downloaded via their "tempLink". This
|
||||||
is to work around a problem with Amazon Drive which blocks downloads
|
is to work around a problem with Amazon Drive which blocks downloads
|
||||||
of files bigger than about 10GB. The default for this is 9GB which
|
of files bigger than about 10 GiB. The default for this is 9 GiB which
|
||||||
shouldn't need to be changed.
|
shouldn't need to be changed.
|
||||||
|
|
||||||
To download files above this threshold, rclone requests a "tempLink"
|
To download files above this threshold, rclone requests a "tempLink"
|
||||||
@@ -143,7 +125,7 @@ underlying S3 storage.`,
|
|||||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||||
Default: (encoder.Base |
|
Default: (encoder.Base |
|
||||||
encoder.EncodeInvalidUtf8),
|
encoder.EncodeInvalidUtf8),
|
||||||
}},
|
}}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,6 +142,7 @@ type Fs struct {
|
|||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
opt Options // options for this Fs
|
opt Options // options for this Fs
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
c *acd.Client // the connection to the acd server
|
c *acd.Client // the connection to the acd server
|
||||||
noAuthClient *http.Client // unauthenticated http client
|
noAuthClient *http.Client // unauthenticated http client
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
@@ -220,7 +203,10 @@ var retryErrorCodes = []int{
|
|||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode == 401 {
|
if resp.StatusCode == 401 {
|
||||||
f.tokenRenewer.Invalidate()
|
f.tokenRenewer.Invalidate()
|
||||||
@@ -255,8 +241,7 @@ func filterRequest(req *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -264,7 +249,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
baseClient := fshttp.NewClient(fs.Config)
|
baseClient := fshttp.NewClient(ctx)
|
||||||
if do, ok := baseClient.Transport.(interface {
|
if do, ok := baseClient.Transport.(interface {
|
||||||
SetRequestFilter(f func(req *http.Request))
|
SetRequestFilter(f func(req *http.Request))
|
||||||
}); ok {
|
}); ok {
|
||||||
@@ -272,29 +257,31 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
} else {
|
} else {
|
||||||
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
||||||
}
|
}
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||||
}
|
}
|
||||||
|
|
||||||
c := acd.NewClient(oAuthClient)
|
c := acd.NewClient(oAuthClient)
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
c: c,
|
c: c,
|
||||||
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||||
noAuthClient: fshttp.NewClient(fs.Config),
|
noAuthClient: fshttp.NewClient(ctx),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
_, err := f.getRootInfo()
|
_, err := f.getRootInfo(ctx)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -302,14 +289,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, resp, err = f.c.Account.GetEndpoints()
|
_, resp, err = f.c.Account.GetEndpoints()
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get endpoints")
|
return nil, errors.Wrap(err, "failed to get endpoints")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get rootID
|
// Get rootID
|
||||||
rootInfo, err := f.getRootInfo()
|
rootInfo, err := f.getRootInfo(ctx)
|
||||||
if err != nil || rootInfo.Id == nil {
|
if err != nil || rootInfo.Id == nil {
|
||||||
return nil, errors.Wrap(err, "failed to get root")
|
return nil, errors.Wrap(err, "failed to get root")
|
||||||
}
|
}
|
||||||
@@ -351,11 +338,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getRootInfo gets the root folder info
|
// getRootInfo gets the root folder info
|
||||||
func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
|
func (f *Fs) getRootInfo(ctx context.Context) (rootInfo *acd.Folder, err error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
rootInfo, resp, err = f.c.Nodes.GetRoot()
|
rootInfo, resp, err = f.c.Nodes.GetRoot()
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return rootInfo, err
|
return rootInfo, err
|
||||||
}
|
}
|
||||||
@@ -394,7 +381,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||||||
var subFolder *acd.Folder
|
var subFolder *acd.Folder
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
|
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == acd.ErrorNodeNotFound {
|
if err == acd.ErrorNodeNotFound {
|
||||||
@@ -421,7 +408,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
var info *acd.Folder
|
var info *acd.Folder
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
|
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//fmt.Printf("...Error %v\n", err)
|
//fmt.Printf("...Error %v\n", err)
|
||||||
@@ -442,7 +429,7 @@ type listAllFn func(*acd.Node) bool
|
|||||||
// Lists the directory required calling the user function on each item found
|
// Lists the directory required calling the user function on each item found
|
||||||
//
|
//
|
||||||
// If the user fn ever returns true then it early exits with found = true
|
// If the user fn ever returns true then it early exits with found = true
|
||||||
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
func (f *Fs) listAll(ctx context.Context, dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||||
query := "parents:" + dirID
|
query := "parents:" + dirID
|
||||||
if directoriesOnly {
|
if directoriesOnly {
|
||||||
query += " AND kind:" + folderKind
|
query += " AND kind:" + folderKind
|
||||||
@@ -463,7 +450,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||||
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
|
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@@ -518,11 +505,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
maxTries := fs.Config.LowLevelRetries
|
maxTries := f.ci.LowLevelRetries
|
||||||
var iErr error
|
var iErr error
|
||||||
for tries := 1; tries <= maxTries; tries++ {
|
for tries := 1; tries <= maxTries; tries++ {
|
||||||
entries = nil
|
entries = nil
|
||||||
_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
|
_, err = f.listAll(ctx, directoryID, "", false, false, func(node *acd.Node) bool {
|
||||||
remote := path.Join(dir, *node.Name)
|
remote := path.Join(dir, *node.Name)
|
||||||
switch *node.Kind {
|
switch *node.Kind {
|
||||||
case folderKind:
|
case folderKind:
|
||||||
@@ -539,7 +526,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
entries = append(entries, o)
|
entries = append(entries, o)
|
||||||
default:
|
default:
|
||||||
// ignore ASSET etc
|
// ignore ASSET, etc.
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
@@ -681,7 +668,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
if ok {
|
if ok {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -696,7 +683,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -722,7 +709,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
|
err = f.moveNode(ctx, srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -733,7 +720,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
dstObj fs.Object
|
dstObj fs.Object
|
||||||
srcErr, dstErr error
|
srcErr, dstErr error
|
||||||
)
|
)
|
||||||
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
for i := 1; i <= f.ci.LowLevelRetries; i++ {
|
||||||
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
||||||
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
||||||
// exit if error on source
|
// exit if error on source
|
||||||
@@ -748,7 +735,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// finished if src not found and dst found
|
// finished if src not found and dst found
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
|
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
return dstObj, dstErr
|
return dstObj, dstErr
|
||||||
@@ -761,7 +748,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -817,7 +804,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
var jsonStr string
|
var jsonStr string
|
||||||
err = srcFs.pacer.Call(func() (bool, error) {
|
err = srcFs.pacer.Call(func() (bool, error) {
|
||||||
jsonStr, err = srcInfo.GetMetadata()
|
jsonStr, err = srcInfo.GetMetadata()
|
||||||
return srcFs.shouldRetry(nil, err)
|
return srcFs.shouldRetry(ctx, nil, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
|
fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
|
||||||
@@ -829,7 +816,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
|
err = f.moveNode(ctx, srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -854,7 +841,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
if check {
|
if check {
|
||||||
// check directory is empty
|
// check directory is empty
|
||||||
empty := true
|
empty := true
|
||||||
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
_, err = f.listAll(ctx, rootID, "", false, false, func(node *acd.Node) bool {
|
||||||
switch *node.Kind {
|
switch *node.Kind {
|
||||||
case folderKind:
|
case folderKind:
|
||||||
empty = false
|
empty = false
|
||||||
@@ -879,7 +866,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = node.Trash()
|
resp, err = node.Trash()
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -909,7 +896,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return hash.Set(hash.MD5)
|
return hash.Set(hash.MD5)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -937,8 +924,8 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge(ctx context.Context) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(ctx, "", false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -1001,7 +988,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
var info *acd.File
|
var info *acd.File
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
|
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
|
||||||
return o.fs.shouldRetry(resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == acd.ErrorNodeNotFound {
|
if err == acd.ErrorNodeNotFound {
|
||||||
@@ -1058,7 +1045,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
} else {
|
} else {
|
||||||
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
|
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
|
||||||
}
|
}
|
||||||
return o.fs.shouldRetry(resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return in, err
|
return in, err
|
||||||
}
|
}
|
||||||
@@ -1081,7 +1068,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if ok {
|
if ok {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return o.fs.shouldRetry(resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1091,70 +1078,70 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove a node
|
// Remove a node
|
||||||
func (f *Fs) removeNode(info *acd.Node) error {
|
func (f *Fs) removeNode(ctx context.Context, info *acd.Node) error {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = info.Trash()
|
resp, err = info.Trash()
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return o.fs.removeNode(o.info)
|
return o.fs.removeNode(ctx, o.info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Restore a node
|
// Restore a node
|
||||||
func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
|
func (f *Fs) restoreNode(ctx context.Context, info *acd.Node) (newInfo *acd.Node, err error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
newInfo, resp, err = info.Restore()
|
newInfo, resp, err = info.Restore()
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return newInfo, err
|
return newInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Changes name of given node
|
// Changes name of given node
|
||||||
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
func (f *Fs) renameNode(ctx context.Context, info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
|
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return newInfo, err
|
return newInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replaces one parent with another, effectively moving the file. Leaves other
|
// Replaces one parent with another, effectively moving the file. Leaves other
|
||||||
// parents untouched. ReplaceParent cannot be used when the file is trashed.
|
// parents untouched. ReplaceParent cannot be used when the file is trashed.
|
||||||
func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error {
|
func (f *Fs) replaceParent(ctx context.Context, info *acd.Node, oldParentID string, newParentID string) error {
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := info.ReplaceParent(oldParentID, newParentID)
|
resp, err := info.ReplaceParent(oldParentID, newParentID)
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds one additional parent to object.
|
// Adds one additional parent to object.
|
||||||
func (f *Fs) addParent(info *acd.Node, newParentID string) error {
|
func (f *Fs) addParent(ctx context.Context, info *acd.Node, newParentID string) error {
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := info.AddParent(newParentID)
|
resp, err := info.AddParent(newParentID)
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove given parent from object, leaving the other possible
|
// Remove given parent from object, leaving the other possible
|
||||||
// parents untouched. Object can end up having no parents.
|
// parents untouched. Object can end up having no parents.
|
||||||
func (f *Fs) removeParent(info *acd.Node, parentID string) error {
|
func (f *Fs) removeParent(ctx context.Context, info *acd.Node, parentID string) error {
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := info.RemoveParent(parentID)
|
resp, err := info.RemoveParent(parentID)
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// moveNode moves the node given from the srcLeaf,srcDirectoryID to
|
// moveNode moves the node given from the srcLeaf,srcDirectoryID to
|
||||||
// the dstLeaf,dstDirectoryID
|
// the dstLeaf,dstDirectoryID
|
||||||
func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
|
func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
|
||||||
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
|
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
|
||||||
cantMove := fs.ErrorCantMove
|
cantMove := fs.ErrorCantMove
|
||||||
if useDirErrorMsgs {
|
if useDirErrorMsgs {
|
||||||
@@ -1168,7 +1155,7 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
|
|||||||
|
|
||||||
if srcLeaf != dstLeaf {
|
if srcLeaf != dstLeaf {
|
||||||
// fs.Debugf(name, "renaming")
|
// fs.Debugf(name, "renaming")
|
||||||
_, err = f.renameNode(srcInfo, dstLeaf)
|
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(name, "Move: quick path rename failed: %v", err)
|
fs.Debugf(name, "Move: quick path rename failed: %v", err)
|
||||||
goto OnConflict
|
goto OnConflict
|
||||||
@@ -1176,7 +1163,7 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
|
|||||||
}
|
}
|
||||||
if srcDirectoryID != dstDirectoryID {
|
if srcDirectoryID != dstDirectoryID {
|
||||||
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
|
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
|
||||||
err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
|
err = f.replaceParent(ctx, srcInfo, srcDirectoryID, dstDirectoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
|
fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
|
||||||
return err
|
return err
|
||||||
@@ -1189,13 +1176,13 @@ OnConflict:
|
|||||||
fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
|
fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
|
||||||
|
|
||||||
// fs.Debugf(name, "Trashing file")
|
// fs.Debugf(name, "Trashing file")
|
||||||
err = f.removeNode(srcInfo)
|
err = f.removeNode(ctx, srcInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(name, "Move: remove node failed: %v", err)
|
fs.Debugf(name, "Move: remove node failed: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// fs.Debugf(name, "Renaming file")
|
// fs.Debugf(name, "Renaming file")
|
||||||
_, err = f.renameNode(srcInfo, dstLeaf)
|
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(name, "Move: rename node failed: %v", err)
|
fs.Debugf(name, "Move: rename node failed: %v", err)
|
||||||
return err
|
return err
|
||||||
@@ -1203,19 +1190,19 @@ OnConflict:
|
|||||||
// note: replacing parent is forbidden by API, modifying them individually is
|
// note: replacing parent is forbidden by API, modifying them individually is
|
||||||
// okay though
|
// okay though
|
||||||
// fs.Debugf(name, "Adding target parent")
|
// fs.Debugf(name, "Adding target parent")
|
||||||
err = f.addParent(srcInfo, dstDirectoryID)
|
err = f.addParent(ctx, srcInfo, dstDirectoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(name, "Move: addParent failed: %v", err)
|
fs.Debugf(name, "Move: addParent failed: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// fs.Debugf(name, "removing original parent")
|
// fs.Debugf(name, "removing original parent")
|
||||||
err = f.removeParent(srcInfo, srcDirectoryID)
|
err = f.removeParent(ctx, srcInfo, srcDirectoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(name, "Move: removeParent failed: %v", err)
|
fs.Debugf(name, "Move: removeParent failed: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// fs.Debugf(name, "Restoring")
|
// fs.Debugf(name, "Restoring")
|
||||||
_, err = f.restoreNode(srcInfo)
|
_, err = f.restoreNode(ctx, srcInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(name, "Move: restoreNode node failed: %v", err)
|
fs.Debugf(name, "Move: restoreNode node failed: %v", err)
|
||||||
return err
|
return err
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9,!solaris,go1.13
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,16 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
// +build !plan9,!solaris,go1.13
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
@@ -27,11 +29,36 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadChunkSize(cs)
|
return f.setUploadChunkSize(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||||
|
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||||
|
ctx := context.TODO()
|
||||||
|
credentials := `
|
||||||
|
{
|
||||||
|
"appId": "my application (client) ID",
|
||||||
|
"password": "my secret",
|
||||||
|
"tenant": "my active directory tenant ID"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
assert.NotNil(t, tokenRefresher)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||||
|
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||||
|
ctx := context.TODO()
|
||||||
|
credentials := `
|
||||||
|
{
|
||||||
|
"appId": "my application (client) ID",
|
||||||
|
"tenant": "my active directory tenant ID"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Build for azureblob for unsupported platforms to stop go complaining
|
// Build for azureblob for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
// +build plan9 solaris !go1.13
|
// +build plan9 solaris js !go1.14
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
137
backend/azureblob/imds.go
Normal file
137
backend/azureblob/imds.go
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
|
package azureblob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
azureResource = "https://storage.azure.com"
|
||||||
|
imdsAPIVersion = "2018-02-01"
|
||||||
|
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This custom type is used to add the port the test server has bound to
|
||||||
|
// to the request context.
|
||||||
|
type testPortKey string
|
||||||
|
|
||||||
|
type msiIdentifierType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
msiClientID msiIdentifierType = iota
|
||||||
|
msiObjectID
|
||||||
|
msiResourceID
|
||||||
|
)
|
||||||
|
|
||||||
|
type userMSI struct {
|
||||||
|
Type msiIdentifierType
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpError struct {
|
||||||
|
Response *http.Response
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e httpError) Error() string {
|
||||||
|
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||||
|
// Metadata Service.
|
||||||
|
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||||
|
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||||
|
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||||
|
result := adal.Token{}
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("resource", azureResource)
|
||||||
|
params.Set("api-version", imdsAPIVersion)
|
||||||
|
|
||||||
|
// Specify user-assigned identity if requested.
|
||||||
|
if identity != nil {
|
||||||
|
switch identity.Type {
|
||||||
|
case msiClientID:
|
||||||
|
params.Set("client_id", identity.Value)
|
||||||
|
case msiObjectID:
|
||||||
|
params.Set("object_id", identity.Value)
|
||||||
|
case msiResourceID:
|
||||||
|
params.Set("mi_res_id", identity.Value)
|
||||||
|
default:
|
||||||
|
// If this happens, the calling function and this one don't agree on
|
||||||
|
// what valid ID types exist.
|
||||||
|
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
// The Metadata header is required by all calls to IMDS.
|
||||||
|
req.Header.Set("Metadata", "true")
|
||||||
|
|
||||||
|
// If this function is run in a test, query the test server instead of IMDS.
|
||||||
|
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||||
|
if isTest {
|
||||||
|
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||||
|
req.Host = req.URL.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send request
|
||||||
|
httpClient := fshttp.NewClient(ctx)
|
||||||
|
resp, err := httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return result, errors.Wrap(err, "MSI is not enabled on this VM")
|
||||||
|
}
|
||||||
|
defer func() { // resp and Body should not be nil
|
||||||
|
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||||
|
}
|
||||||
|
err = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// Check if the status code indicates success
|
||||||
|
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case 200, 201, 202:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
body, _ := ioutil.ReadAll(resp.Body)
|
||||||
|
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||||
|
return result, httpError{Response: resp}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return result, errors.Wrap(err, "Couldn't read IMDS response")
|
||||||
|
}
|
||||||
|
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||||
|
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||||
|
|
||||||
|
// This would be a good place to persist the token if a large number of rclone
|
||||||
|
// invocations are being made in a short amount of time. If the token is
|
||||||
|
// persisted, the azureblob code will need to check for expiry before every
|
||||||
|
// storage API call.
|
||||||
|
err = json.Unmarshal(b, &result)
|
||||||
|
if err != nil {
|
||||||
|
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
117
backend/azureblob/imds_test.go
Normal file
117
backend/azureblob/imds_test.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
|
package azureblob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
err := r.ParseForm()
|
||||||
|
require.NoError(t, err)
|
||||||
|
parameters := r.URL.Query()
|
||||||
|
(*actual)["path"] = r.URL.Path
|
||||||
|
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||||
|
(*actual)["method"] = r.Method
|
||||||
|
for paramName := range parameters {
|
||||||
|
(*actual)[paramName] = parameters.Get(paramName)
|
||||||
|
}
|
||||||
|
// Make response.
|
||||||
|
response := adal.Token{}
|
||||||
|
responseBytes, err := json.Marshal(response)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = w.Write(responseBytes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestManagedIdentity(t *testing.T) {
|
||||||
|
// test user-assigned identity specifiers to use
|
||||||
|
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||||
|
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||||
|
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||||
|
tests := []struct {
|
||||||
|
identity *userMSI
|
||||||
|
identityParameterName string
|
||||||
|
expectedAbsent []string
|
||||||
|
}{
|
||||||
|
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||||
|
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||||
|
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||||
|
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||||
|
}
|
||||||
|
alwaysExpected := map[string]string{
|
||||||
|
"path": "/metadata/identity/oauth2/token",
|
||||||
|
"resource": "https://storage.azure.com",
|
||||||
|
"Metadata": "true",
|
||||||
|
"api-version": "2018-02-01",
|
||||||
|
"method": "GET",
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
actual := make(map[string]string, 10)
|
||||||
|
testServer := httptest.NewServer(handler(t, &actual))
|
||||||
|
defer testServer.Close()
|
||||||
|
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||||
|
_, err = GetMSIToken(ctx, test.identity)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Validate expected query parameters present
|
||||||
|
expected := make(map[string]string)
|
||||||
|
for k, v := range alwaysExpected {
|
||||||
|
expected[k] = v
|
||||||
|
}
|
||||||
|
if test.identity != nil {
|
||||||
|
expected[test.identityParameterName] = test.identity.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
for key := range expected {
|
||||||
|
value, exists := actual[key]
|
||||||
|
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||||
|
test.identityParameterName, key) {
|
||||||
|
assert.Equalf(t, expected[key], value,
|
||||||
|
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate unexpected query parameters absent
|
||||||
|
for _, key := range test.expectedAbsent {
|
||||||
|
_, exists := actual[key]
|
||||||
|
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorHandler(resultCode int) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
http.Error(w, "Test error generated", resultCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIMDSErrors(t *testing.T) {
|
||||||
|
errorCodes := []int{404, 429, 500}
|
||||||
|
for _, code := range errorCodes {
|
||||||
|
testServer := httptest.NewServer(errorHandler(code))
|
||||||
|
defer testServer.Close()
|
||||||
|
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||||
|
_, err = GetMSIToken(ctx, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
httpErr, ok := err.(httpError)
|
||||||
|
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||||
|
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,12 +2,11 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/lib/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error describes a B2 error response
|
// Error describes a B2 error response
|
||||||
@@ -63,16 +62,17 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
const versionFormat = "-v2006-01-02-150405.000"
|
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
|
||||||
|
//
|
||||||
|
// Note that the passed filename's timestamp may still be invalid even if this
|
||||||
|
// function returns true.
|
||||||
|
func HasVersion(remote string) bool {
|
||||||
|
return version.Match(remote)
|
||||||
|
}
|
||||||
|
|
||||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||||
func (t Timestamp) AddVersion(remote string) string {
|
func (t Timestamp) AddVersion(remote string) string {
|
||||||
ext := path.Ext(remote)
|
return version.Add(remote, time.Time(t))
|
||||||
base := remote[:len(remote)-len(ext)]
|
|
||||||
s := time.Time(t).Format(versionFormat)
|
|
||||||
// Replace the '.' with a '-'
|
|
||||||
s = strings.Replace(s, ".", "-", -1)
|
|
||||||
return base + s + ext
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||||
@@ -80,24 +80,9 @@ func (t Timestamp) AddVersion(remote string) string {
|
|||||||
// It returns the new file name and a timestamp, or the old filename
|
// It returns the new file name and a timestamp, or the old filename
|
||||||
// and a zero timestamp.
|
// and a zero timestamp.
|
||||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||||
newRemote = remote
|
time, newRemote := version.Remove(remote)
|
||||||
ext := path.Ext(remote)
|
t = Timestamp(time)
|
||||||
base := remote[:len(remote)-len(ext)]
|
return
|
||||||
if len(base) < len(versionFormat) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
versionStart := len(base) - len(versionFormat)
|
|
||||||
// Check it ends in -xxx
|
|
||||||
if base[len(base)-4] != '-' {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Replace with .xxx for parsing
|
|
||||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
|
||||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return Timestamp(newT), base[:versionStart] + ext
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsZero returns true if the timestamp is uninitialized
|
// IsZero returns true if the timestamp is uninitialized
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ import (
|
|||||||
var (
|
var (
|
||||||
emptyT api.Timestamp
|
emptyT api.Timestamp
|
||||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
|
||||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -36,40 +35,6 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
|
|||||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimestampAddVersion(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
t api.Timestamp
|
|
||||||
in string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
|
||||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
|
||||||
{t1, "", "-v2001-02-03-040506-123"},
|
|
||||||
} {
|
|
||||||
actual := test.t.AddVersion(test.in)
|
|
||||||
assert.Equal(t, test.expected, actual, test.in)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTimestampRemoveVersion(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
in string
|
|
||||||
expectedT api.Timestamp
|
|
||||||
expectedRemote string
|
|
||||||
}{
|
|
||||||
{"potato.txt", emptyT, "potato.txt"},
|
|
||||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
|
||||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
|
||||||
{"-v2001-02-03-040506-123", t1, ""},
|
|
||||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
|
||||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
|
||||||
} {
|
|
||||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
|
||||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
|
||||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTimestampIsZero(t *testing.T) {
|
func TestTimestampIsZero(t *testing.T) {
|
||||||
assert.True(t, emptyT.IsZero())
|
assert.True(t, emptyT.IsZero())
|
||||||
assert.False(t, t0.IsZero())
|
assert.False(t, t0.IsZero())
|
||||||
|
|||||||
203
backend/b2/b2.go
203
backend/b2/b2.go
@@ -44,18 +44,20 @@ const (
|
|||||||
timeHeader = headerPrefix + timeKey
|
timeHeader = headerPrefix + timeKey
|
||||||
sha1Key = "large_file_sha1"
|
sha1Key = "large_file_sha1"
|
||||||
sha1Header = "X-Bz-Content-Sha1"
|
sha1Header = "X-Bz-Content-Sha1"
|
||||||
sha1InfoHeader = headerPrefix + sha1Key
|
|
||||||
testModeHeader = "X-Bz-Test-Mode"
|
testModeHeader = "X-Bz-Test-Mode"
|
||||||
|
idHeader = "X-Bz-File-Id"
|
||||||
|
nameHeader = "X-Bz-File-Name"
|
||||||
|
timestampHeader = "X-Bz-Upload-Timestamp"
|
||||||
retryAfterHeader = "Retry-After"
|
retryAfterHeader = "Retry-After"
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 5 * time.Minute
|
maxSleep = 5 * time.Minute
|
||||||
decayConstant = 1 // bigger for slower decay, exponential
|
decayConstant = 1 // bigger for slower decay, exponential
|
||||||
maxParts = 10000
|
maxParts = 10000
|
||||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||||
minChunkSize = 5 * fs.MebiByte
|
minChunkSize = 5 * fs.Mebi
|
||||||
defaultChunkSize = 96 * fs.MebiByte
|
defaultChunkSize = 96 * fs.Mebi
|
||||||
defaultUploadCutoff = 200 * fs.MebiByte
|
defaultUploadCutoff = 200 * fs.Mebi
|
||||||
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
|
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||||
memoryPoolUseMmap = false
|
memoryPoolUseMmap = false
|
||||||
)
|
)
|
||||||
@@ -114,17 +116,17 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
|||||||
|
|
||||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||||
|
|
||||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
This value should be set no larger than 4.657 GiB (== 5 GB).`,
|
||||||
Default: defaultUploadCutoff,
|
Default: defaultUploadCutoff,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "copy_cutoff",
|
Name: "copy_cutoff",
|
||||||
Help: `Cutoff for switching to multipart copy
|
Help: `Cutoff for switching to multipart copy
|
||||||
|
|
||||||
Any files larger than this that need to be server side copied will be
|
Any files larger than this that need to be server-side copied will be
|
||||||
copied in chunks of this size.
|
copied in chunks of this size.
|
||||||
|
|
||||||
The minimum is 0 and the maximum is 4.6GB.`,
|
The minimum is 0 and the maximum is 4.6 GiB.`,
|
||||||
Default: largeFileCopyCutoff,
|
Default: largeFileCopyCutoff,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -153,7 +155,9 @@ to start uploading.`,
|
|||||||
|
|
||||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||||
free egress for data downloaded through the Cloudflare network.
|
free egress for data downloaded through the Cloudflare network.
|
||||||
This is probably only useful for a public bucket.
|
Rclone works with private buckets by sending an "Authorization" header.
|
||||||
|
If the custom endpoint rewrites the requests for authentication,
|
||||||
|
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -214,6 +218,7 @@ type Fs struct {
|
|||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
opt Options // parsed config options
|
opt Options // parsed config options
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the b2 server
|
srv *rest.Client // the connection to the b2 server
|
||||||
rootBucket string // bucket part of root (if any)
|
rootBucket string // bucket part of root (if any)
|
||||||
@@ -290,7 +295,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
401, // Unauthorized (eg "Token has expired")
|
401, // Unauthorized (e.g. "Token has expired")
|
||||||
408, // Request Timeout
|
408, // Request Timeout
|
||||||
429, // Rate exceeded.
|
429, // Rate exceeded.
|
||||||
500, // Get occasional 500 Internal Server Error
|
500, // Get occasional 500 Internal Server Error
|
||||||
@@ -300,7 +305,10 @@ var retryErrorCodes = []int{
|
|||||||
|
|
||||||
// shouldRetryNoAuth returns a boolean as to whether this resp and err
|
// shouldRetryNoAuth returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
// For 429 or 503 errors look at the Retry-After: header and
|
// For 429 or 503 errors look at the Retry-After: header and
|
||||||
// set the retry appropriately, starting with a minimum of 1
|
// set the retry appropriately, starting with a minimum of 1
|
||||||
// second if it isn't set.
|
// second if it isn't set.
|
||||||
@@ -331,7 +339,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
|||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
return f.shouldRetryNoReauth(resp, err)
|
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// errorHandler parses a non 2xx error response into an error
|
// errorHandler parses a non 2xx error response into an error
|
||||||
@@ -391,14 +399,17 @@ func (f *Fs) setRoot(root string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if opt.UploadCutoff < opt.ChunkSize {
|
||||||
|
opt.UploadCutoff = opt.ChunkSize
|
||||||
|
fs.Infof(nil, "b2: raising upload cutoff to chunk size: %v", opt.UploadCutoff)
|
||||||
|
}
|
||||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
return nil, errors.Wrap(err, "b2: upload cutoff")
|
||||||
@@ -416,20 +427,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if opt.Endpoint == "" {
|
if opt.Endpoint == "" {
|
||||||
opt.Endpoint = defaultEndpoint
|
opt.Endpoint = defaultEndpoint
|
||||||
}
|
}
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
ci: ci,
|
||||||
|
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
_bucketID: make(map[string]string, 1),
|
_bucketID: make(map[string]string, 1),
|
||||||
_bucketType: make(map[string]string, 1),
|
_bucketType: make(map[string]string, 1),
|
||||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
pool: pool.New(
|
pool: pool.New(
|
||||||
time.Duration(opt.MemoryPoolFlushTime),
|
time.Duration(opt.MemoryPoolFlushTime),
|
||||||
int(opt.ChunkSize),
|
int(opt.ChunkSize),
|
||||||
fs.Config.Transfers,
|
ci.Transfers,
|
||||||
opt.MemoryPoolUseMmap,
|
opt.MemoryPoolUseMmap,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
@@ -439,7 +452,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
// Set the test flag if required
|
// Set the test flag if required
|
||||||
if opt.TestMode != "" {
|
if opt.TestMode != "" {
|
||||||
testMode := strings.TrimSpace(opt.TestMode)
|
testMode := strings.TrimSpace(opt.TestMode)
|
||||||
@@ -469,12 +482,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.setRoot(newRoot)
|
f.setRoot(newRoot)
|
||||||
_, err := f.NewObject(ctx, leaf)
|
_, err := f.NewObject(ctx, leaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
// File doesn't exist so return old f
|
||||||
// File doesn't exist so return old f
|
f.setRoot(oldRoot)
|
||||||
f.setRoot(oldRoot)
|
return f, nil
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
// return an error with an fs which points to the parent
|
// return an error with an fs which points to the parent
|
||||||
return f, fs.ErrorIsFile
|
return f, fs.ErrorIsFile
|
||||||
@@ -497,7 +507,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
|
resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
|
||||||
return f.shouldRetryNoReauth(resp, err)
|
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to authenticate")
|
return errors.Wrap(err, "failed to authenticate")
|
||||||
@@ -702,7 +712,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
remote := file.Name[len(prefix):]
|
remote := file.Name[len(prefix):]
|
||||||
// Check for directory
|
// Check for directory
|
||||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||||
if isDirectory {
|
if isDirectory && len(remote) > 1 {
|
||||||
remote = remote[:len(remote)-1]
|
remote = remote[:len(remote)-1]
|
||||||
}
|
}
|
||||||
if addBucket {
|
if addBucket {
|
||||||
@@ -1143,7 +1153,8 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
|||||||
// if oldOnly is true then it deletes only non current files.
|
// if oldOnly is true then it deletes only non current files.
|
||||||
//
|
//
|
||||||
// Implemented here so we can make sure we delete old versions.
|
// Implemented here so we can make sure we delete old versions.
|
||||||
func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
|
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||||
|
bucket, directory := f.split(dir)
|
||||||
if bucket == "" {
|
if bucket == "" {
|
||||||
return errors.New("can't purge from root")
|
return errors.New("can't purge from root")
|
||||||
}
|
}
|
||||||
@@ -1167,10 +1178,10 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete Config.Transfers in parallel
|
// Delete Config.Transfers in parallel
|
||||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(fs.Config.Transfers)
|
wg.Add(f.ci.Transfers)
|
||||||
for i := 0; i < fs.Config.Transfers; i++ {
|
for i := 0; i < f.ci.Transfers; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for object := range toBeDeleted {
|
for object := range toBeDeleted {
|
||||||
@@ -1182,7 +1193,7 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
|
|||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||||
checkErr(err)
|
checkErr(err)
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@@ -1210,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
|
|||||||
toBeDeleted <- object
|
toBeDeleted <- object
|
||||||
}
|
}
|
||||||
last = remote
|
last = remote
|
||||||
tr.Done(nil)
|
tr.Done(ctx, nil)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}))
|
}))
|
||||||
@@ -1218,22 +1229,22 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
if !oldOnly {
|
if !oldOnly {
|
||||||
checkErr(f.Rmdir(ctx, ""))
|
checkErr(f.Rmdir(ctx, dir))
|
||||||
}
|
}
|
||||||
return errReturn
|
return errReturn
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge deletes all the files and directories including the old versions.
|
// Purge deletes all the files and directories including the old versions.
|
||||||
func (f *Fs) Purge(ctx context.Context) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
|
return f.purge(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp deletes all the hidden files.
|
// CleanUp deletes all the hidden files.
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
|
return f.purge(ctx, "", true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy does a server side copy from dstObj <- srcObj
|
// copy does a server-side copy from dstObj <- srcObj
|
||||||
//
|
//
|
||||||
// If newInfo is nil then the metadata will be copied otherwise it
|
// If newInfo is nil then the metadata will be copied otherwise it
|
||||||
// will be replaced with newInfo
|
// will be replaced with newInfo
|
||||||
@@ -1290,7 +1301,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
|||||||
return dstObj.decodeMetaDataFileInfo(&response)
|
return dstObj.decodeMetaDataFileInfo(&response)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1342,7 +1353,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
|||||||
}
|
}
|
||||||
var request = api.GetDownloadAuthorizationRequest{
|
var request = api.GetDownloadAuthorizationRequest{
|
||||||
BucketID: bucketID,
|
BucketID: bucketID,
|
||||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
|
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.rootDirectory, remote)),
|
||||||
ValidDurationInSeconds: validDurationInSeconds,
|
ValidDurationInSeconds: validDurationInSeconds,
|
||||||
}
|
}
|
||||||
var response api.GetDownloadAuthorizationResponse
|
var response api.GetDownloadAuthorizationResponse
|
||||||
@@ -1439,7 +1450,7 @@ func (o *Object) Size() int64 {
|
|||||||
// Make sure it is lower case
|
// Make sure it is lower case
|
||||||
//
|
//
|
||||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||||
// Some tools (eg Cyberduck) use this
|
// Some tools (e.g. Cyberduck) use this
|
||||||
func cleanSHA1(sha1 string) (out string) {
|
func cleanSHA1(sha1 string) (out string) {
|
||||||
out = strings.ToLower(sha1)
|
out = strings.ToLower(sha1)
|
||||||
const unverified = "unverified:"
|
const unverified = "unverified:"
|
||||||
@@ -1493,8 +1504,11 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
|
|||||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getMetaData gets the metadata from the object unconditionally
|
// getMetaDataListing gets the metadata from the object unconditionally from the listing
|
||||||
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
//
|
||||||
|
// Note that listing is a class C transaction which costs more than
|
||||||
|
// the B transaction used in getMetaData
|
||||||
|
func (o *Object) getMetaDataListing(ctx context.Context) (info *api.File, err error) {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
maxSearched := 1
|
maxSearched := 1
|
||||||
var timestamp api.Timestamp
|
var timestamp api.Timestamp
|
||||||
@@ -1527,6 +1541,19 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
|||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getMetaData gets the metadata from the object unconditionally
|
||||||
|
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||||
|
// If using versions and have a version suffix, need to list the directory to find the correct versions
|
||||||
|
if o.fs.opt.Versions {
|
||||||
|
timestamp, _ := api.RemoveVersion(o.remote)
|
||||||
|
if !timestamp.IsZero() {
|
||||||
|
return o.getMetaDataListing(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, info, err = o.getOrHead(ctx, "HEAD", nil)
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// Sets
|
// Sets
|
||||||
@@ -1656,12 +1683,11 @@ func (file *openFile) Close() (err error) {
|
|||||||
// Check it satisfies the interfaces
|
// Check it satisfies the interfaces
|
||||||
var _ io.ReadCloser = &openFile{}
|
var _ io.ReadCloser = &openFile{}
|
||||||
|
|
||||||
// Open an object for read
|
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: method,
|
||||||
Options: options,
|
Options: options,
|
||||||
|
NoResponse: method == "HEAD",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||||
@@ -1672,44 +1698,81 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
opts.RootURL = o.fs.opt.DownloadURL
|
opts.RootURL = o.fs.opt.DownloadURL
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download by id if set otherwise by name
|
// Download by id if set and not using DownloadURL otherwise by name
|
||||||
if o.id != "" {
|
if o.id != "" && o.fs.opt.DownloadURL == "" {
|
||||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||||
} else {
|
} else {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
||||||
}
|
}
|
||||||
var resp *http.Response
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return o.fs.shouldRetry(ctx, resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to open for download")
|
// 404 for files, 400 for directories
|
||||||
|
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
|
||||||
|
return nil, nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the time out of the headers if possible
|
// NB resp may be Open here - don't return err != nil without closing
|
||||||
err = o.parseTimeString(resp.Header.Get(timeHeader))
|
|
||||||
|
// Convert the Headers into an api.File
|
||||||
|
var uploadTimestamp api.Timestamp
|
||||||
|
err = uploadTimestamp.UnmarshalJSON([]byte(resp.Header.Get(timestampHeader)))
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(o, "Bad "+timestampHeader+" header: %v", err)
|
||||||
|
}
|
||||||
|
var Info = make(map[string]string)
|
||||||
|
for k, vs := range resp.Header {
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
for _, v := range vs {
|
||||||
|
if strings.HasPrefix(k, headerPrefix) {
|
||||||
|
Info[k[len(headerPrefix):]] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info = &api.File{
|
||||||
|
ID: resp.Header.Get(idHeader),
|
||||||
|
Name: resp.Header.Get(nameHeader),
|
||||||
|
Action: "upload",
|
||||||
|
Size: resp.ContentLength,
|
||||||
|
UploadTimestamp: uploadTimestamp,
|
||||||
|
SHA1: resp.Header.Get(sha1Header),
|
||||||
|
ContentType: resp.Header.Get("Content-Type"),
|
||||||
|
Info: Info,
|
||||||
|
}
|
||||||
|
// When reading files from B2 via cloudflare using
|
||||||
|
// --b2-download-url cloudflare strips the Content-Length
|
||||||
|
// headers (presumably so it can inject stuff) so use the old
|
||||||
|
// length read from the listing.
|
||||||
|
if info.Size < 0 {
|
||||||
|
info.Size = o.size
|
||||||
|
}
|
||||||
|
return resp, info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open an object for read
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
fs.FixRangeOption(options, o.size)
|
||||||
|
|
||||||
|
resp, info, err := o.getOrHead(ctx, "GET", options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't check length or hash or metadata on partial content
|
||||||
|
if resp.StatusCode == http.StatusPartialContent {
|
||||||
|
return resp.Body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = o.decodeMetaData(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = resp.Body.Close()
|
_ = resp.Body.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Read sha1 from header if it isn't set
|
|
||||||
if o.sha1 == "" {
|
|
||||||
o.sha1 = resp.Header.Get(sha1Header)
|
|
||||||
fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
|
|
||||||
// if sha1 header is "none" (in big files), then need
|
|
||||||
// to read it from the metadata
|
|
||||||
if o.sha1 == "none" {
|
|
||||||
o.sha1 = resp.Header.Get(sha1InfoHeader)
|
|
||||||
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
|
|
||||||
}
|
|
||||||
o.sha1 = cleanSHA1(o.sha1)
|
|
||||||
}
|
|
||||||
// Don't check length or hash on partial content
|
|
||||||
if resp.StatusCode == http.StatusPartialContent {
|
|
||||||
return resp.Body, nil
|
|
||||||
}
|
|
||||||
return newOpenFile(o, resp), nil
|
return newOpenFile(o, resp), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -230,14 +230,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
|||||||
//
|
//
|
||||||
// The number of bytes in the file being uploaded. Note that
|
// The number of bytes in the file being uploaded. Note that
|
||||||
// this header is required; you cannot leave it out and just
|
// this header is required; you cannot leave it out and just
|
||||||
// use chunked encoding. The minimum size of every part but
|
// use chunked encoding. The minimum size of every part but
|
||||||
// the last one is 100MB.
|
// the last one is 100 MB (100,000,000 bytes)
|
||||||
//
|
//
|
||||||
// X-Bz-Content-Sha1
|
// X-Bz-Content-Sha1
|
||||||
//
|
//
|
||||||
// The SHA1 checksum of the this part of the file. B2 will
|
// The SHA1 checksum of the this part of the file. B2 will
|
||||||
// check this when the part is uploaded, to make sure that the
|
// check this when the part is uploaded, to make sure that the
|
||||||
// data arrived correctly. The same SHA1 checksum must be
|
// data arrived correctly. The same SHA1 checksum must be
|
||||||
// passed to b2_finish_large_file.
|
// passed to b2_finish_large_file.
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
|
|||||||
@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
|||||||
|
|
||||||
// Error is returned from box when things go wrong
|
// Error is returned from box when things go wrong
|
||||||
type Error struct {
|
type Error struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
ContextInfo json.RawMessage
|
ContextInfo json.RawMessage `json:"context_info"`
|
||||||
HelpURL string `json:"help_url"`
|
HelpURL string `json:"help_url"`
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
RequestID string `json:"request_id"`
|
RequestID string `json:"request_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and satisfies the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
@@ -132,6 +132,38 @@ type UploadFile struct {
|
|||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PreUploadCheck is the request for upload preflight check
|
||||||
|
type PreUploadCheck struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Parent Parent `json:"parent"`
|
||||||
|
Size *int64 `json:"size,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreUploadCheckResponse is the response from upload preflight check
|
||||||
|
// if successful
|
||||||
|
type PreUploadCheckResponse struct {
|
||||||
|
UploadToken string `json:"upload_token"`
|
||||||
|
UploadURL string `json:"upload_url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreUploadCheckConflict is returned in the ContextInfo error field
|
||||||
|
// from PreUploadCheck when the error code is "item_name_in_use"
|
||||||
|
type PreUploadCheckConflict struct {
|
||||||
|
Conflicts struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
FileVersion struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Sha1 string `json:"sha1"`
|
||||||
|
} `json:"file_version"`
|
||||||
|
SequenceID string `json:"sequence_id"`
|
||||||
|
Etag string `json:"etag"`
|
||||||
|
Sha1 string `json:"sha1"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"conflicts"`
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateFileModTime is used in Update File Info
|
// UpdateFileModTime is used in Update File Info
|
||||||
type UpdateFileModTime struct {
|
type UpdateFileModTime struct {
|
||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@@ -84,29 +83,26 @@ func init() {
|
|||||||
Name: "box",
|
Name: "box",
|
||||||
Description: "Box",
|
Description: "Box",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
jsonFile, ok := m.Get("box_config_file")
|
jsonFile, ok := m.Get("box_config_file")
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||||
|
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||||
var err error
|
var err error
|
||||||
|
// If using box config.json, use JWT auth
|
||||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||||
err = refreshJWTToken(jsonFile, boxSubType, name, m)
|
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = oauthutil.Config("box", name, m, oauthConfig, nil)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
|
||||||
}
|
}
|
||||||
|
// Else, if not using an access token, use oauth2
|
||||||
|
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||||
|
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||||
|
OAuth2Config: oauthConfig,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
return nil, nil
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
|
||||||
Help: "Box App Client Id.\nLeave blank normally.",
|
|
||||||
}, {
|
|
||||||
Name: config.ConfigClientSecret,
|
|
||||||
Help: "Box App Client Secret\nLeave blank normally.",
|
|
||||||
}, {
|
|
||||||
Name: "root_folder_id",
|
Name: "root_folder_id",
|
||||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||||
Default: "0",
|
Default: "0",
|
||||||
@@ -114,6 +110,9 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "box_config_file",
|
Name: "box_config_file",
|
||||||
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
|
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
|
||||||
|
}, {
|
||||||
|
Name: "access_token",
|
||||||
|
Help: "Box App Primary Access Token\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: "box_sub_type",
|
Name: "box_sub_type",
|
||||||
Default: "user",
|
Default: "user",
|
||||||
@@ -126,7 +125,7 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
|
||||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -149,27 +148,27 @@ func init() {
|
|||||||
encoder.EncodeBackSlash |
|
encoder.EncodeBackSlash |
|
||||||
encoder.EncodeRightSpace |
|
encoder.EncodeRightSpace |
|
||||||
encoder.EncodeInvalidUtf8),
|
encoder.EncodeInvalidUtf8),
|
||||||
}},
|
}}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||||
jsonFile = env.ShellExpand(jsonFile)
|
jsonFile = env.ShellExpand(jsonFile)
|
||||||
boxConfig, err := getBoxConfig(jsonFile)
|
boxConfig, err := getBoxConfig(jsonFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
return errors.Wrap(err, "get box config")
|
||||||
}
|
}
|
||||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
return errors.Wrap(err, "get decrypted private key")
|
||||||
}
|
}
|
||||||
claims, err := getClaims(boxConfig, boxSubType)
|
claims, err := getClaims(boxConfig, boxSubType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
return errors.Wrap(err, "get claims")
|
||||||
}
|
}
|
||||||
signingHeaders := getSigningHeaders(boxConfig)
|
signingHeaders := getSigningHeaders(boxConfig)
|
||||||
queryParams := getQueryParams(boxConfig)
|
queryParams := getQueryParams(boxConfig)
|
||||||
client := fshttp.NewClient(fs.Config)
|
client := fshttp.NewClient(ctx)
|
||||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -247,6 +246,7 @@ type Options struct {
|
|||||||
CommitRetries int `config:"commit_retries"`
|
CommitRetries int `config:"commit_retries"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
RootFolderID string `config:"root_folder_id"`
|
RootFolderID string `config:"root_folder_id"`
|
||||||
|
AccessToken string `config:"access_token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote box
|
// Fs represents a remote box
|
||||||
@@ -316,10 +316,13 @@ var retryErrorCodes = []int{
|
|||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
authRetry := false
|
authRetry := false
|
||||||
|
|
||||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") {
|
||||||
authRetry = true
|
authRetry = true
|
||||||
fs.Debugf(nil, "Should retry: %v", err)
|
fs.Debugf(nil, "Should retry: %v", err)
|
||||||
}
|
}
|
||||||
@@ -338,7 +341,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
|||||||
}
|
}
|
||||||
|
|
||||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||||
if item.Name == leaf {
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
info = item
|
info = item
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -371,8 +374,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -385,42 +387,56 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
|
||||||
if err != nil {
|
client := fshttp.NewClient(ctx)
|
||||||
return nil, errors.Wrap(err, "failed to configure Box")
|
var ts *oauthutil.TokenSource
|
||||||
|
// If not using an accessToken, create an oauth client and tokensource
|
||||||
|
if opt.AccessToken == "" {
|
||||||
|
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to configure Box")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
|
// If using an accessToken, set the Authorization header
|
||||||
|
if f.opt.AccessToken != "" {
|
||||||
|
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||||
|
}
|
||||||
|
|
||||||
jsonFile, ok := m.Get("box_config_file")
|
jsonFile, ok := m.Get("box_config_file")
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||||
|
|
||||||
// If using box config.json and JWT, renewing should just refresh the token and
|
if ts != nil {
|
||||||
// should do so whether there are uploads pending or not.
|
// If using box config.json and JWT, renewing should just refresh the token and
|
||||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
// should do so whether there are uploads pending or not.
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
return err
|
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||||
})
|
return err
|
||||||
f.tokenRenewer.Start()
|
})
|
||||||
} else {
|
f.tokenRenewer.Start()
|
||||||
// Renew the token in the background
|
} else {
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
// Renew the token in the background
|
||||||
_, err := f.readMetaDataForPath(ctx, "")
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
return err
|
_, err := f.readMetaDataForPath(ctx, "")
|
||||||
})
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get rootFolderID
|
// Get rootFolderID
|
||||||
@@ -449,7 +465,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.features.Fill(&tempF)
|
f.features.Fill(ctx, &tempF)
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
// XXX: update the old f here instead of returning tempF, since
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
// `features` were already filled with functions having *f as a receiver.
|
||||||
// See https://github.com/rclone/rclone/issues/2182
|
// See https://github.com/rclone/rclone/issues/2182
|
||||||
@@ -500,7 +516,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||||
if item.Name == leaf {
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
pathIDOut = item.ID
|
pathIDOut = item.ID
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -534,7 +550,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//fmt.Printf("...Error %v\n", err)
|
//fmt.Printf("...Error %v\n", err)
|
||||||
@@ -571,7 +587,7 @@ OUTER:
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return found, errors.Wrap(err, "couldn't list files")
|
return found, errors.Wrap(err, "couldn't list files")
|
||||||
@@ -669,22 +685,80 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||||||
return o, leaf, directoryID, nil
|
return o, leaf, directoryID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// preUploadCheck checks to see if a file can be uploaded
|
||||||
|
//
|
||||||
|
// It returns "", nil if the file is good to go
|
||||||
|
// It returns "ID", nil if the file must be updated
|
||||||
|
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
||||||
|
check := api.PreUploadCheck{
|
||||||
|
Name: f.opt.Enc.FromStandardName(leaf),
|
||||||
|
Parent: api.Parent{
|
||||||
|
ID: directoryID,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if size >= 0 {
|
||||||
|
check.Size = &size
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "OPTIONS",
|
||||||
|
Path: "/files/content/",
|
||||||
|
}
|
||||||
|
var result api.PreUploadCheckResponse
|
||||||
|
var resp *http.Response
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = f.srv.CallJSON(ctx, &opts, &check, &result)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" {
|
||||||
|
var conflict api.PreUploadCheckConflict
|
||||||
|
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
|
||||||
|
}
|
||||||
|
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||||
|
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
|
||||||
|
}
|
||||||
|
return conflict.Conflicts.ID, nil
|
||||||
|
}
|
||||||
|
return "", errors.Wrap(err, "pre-upload check")
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
// If directory doesn't exist, file doesn't exist so can upload
|
||||||
switch err {
|
remote := src.Remote()
|
||||||
case nil:
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
if err != nil {
|
||||||
case fs.ErrorObjectNotFound:
|
if err == fs.ErrorDirNotFound {
|
||||||
// Not found so create it
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
return f.PutUnchecked(ctx, in, src)
|
}
|
||||||
default:
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Preflight check the upload, which returns the ID if the
|
||||||
|
// object already exists
|
||||||
|
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if ID == "" {
|
||||||
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If object exists then create a skeleton one with just id
|
||||||
|
o := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
id: ID,
|
||||||
|
}
|
||||||
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
@@ -726,7 +800,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
|||||||
}
|
}
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.Call(ctx, &opts)
|
resp, err := f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -753,7 +827,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.Call(ctx, &opts)
|
resp, err = f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "rmdir failed")
|
return errors.Wrap(err, "rmdir failed")
|
||||||
@@ -777,7 +851,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
return time.Second
|
return time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -825,7 +899,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
var info *api.Item
|
var info *api.Item
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -842,8 +916,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge(ctx context.Context) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(ctx, "", false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// move a file or folder
|
// move a file or folder
|
||||||
@@ -863,7 +937,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -881,7 +955,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read user info")
|
return nil, errors.Wrap(err, "failed to read user info")
|
||||||
@@ -895,7 +969,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -931,7 +1005,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -994,12 +1068,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return info.SharedLink.URL, err
|
return info.SharedLink.URL, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// deletePermanently permenently deletes a trashed file
|
// deletePermanently permanently deletes a trashed file
|
||||||
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "DELETE",
|
Method: "DELETE",
|
||||||
@@ -1012,7 +1086,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
|||||||
}
|
}
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.Call(ctx, &opts)
|
resp, err := f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1034,7 +1108,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't list trash")
|
return errors.Wrap(err, "couldn't list trash")
|
||||||
@@ -1168,7 +1242,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
|||||||
var info *api.Item
|
var info *api.Item
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
@@ -1201,7 +1275,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1211,7 +1285,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
// upload does a single non-multipart upload
|
// upload does a single non-multipart upload
|
||||||
//
|
//
|
||||||
// This is recommended for less than 50 MB of content
|
// This is recommended for less than 50 MiB of content
|
||||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||||
upload := api.UploadFile{
|
upload := api.UploadFile{
|
||||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||||
@@ -1241,7 +1315,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
|||||||
}
|
}
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1258,8 +1332,10 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
o.fs.tokenRenewer.Start()
|
if o.fs.tokenRenewer != nil {
|
||||||
defer o.fs.tokenRenewer.Stop()
|
o.fs.tokenRenewer.Start()
|
||||||
|
defer o.fs.tokenRenewer.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime(ctx)
|
modTime := src.ModTime(ctx)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// multpart upload for box
|
// multipart upload for box
|
||||||
|
|
||||||
package box
|
package box
|
||||||
|
|
||||||
@@ -44,7 +44,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -74,7 +74,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
|||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
opts.Body = wrap(bytes.NewReader(chunk))
|
opts.Body = wrap(bytes.NewReader(chunk))
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -109,10 +109,10 @@ outer:
|
|||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
}
|
}
|
||||||
body, err = rest.ReadBody(resp)
|
body, err = rest.ReadBody(resp)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
delay := defaultDelay
|
delay := defaultDelay
|
||||||
var why string
|
var why string
|
||||||
@@ -167,7 +167,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
80
backend/cache/cache.go
vendored
80
backend/cache/cache.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@ func init() {
|
|||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_url",
|
Name: "plex_url",
|
||||||
@@ -98,18 +98,18 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
|
|||||||
will need to be cleared or unexpected EOF errors will occur.`,
|
will need to be cleared or unexpected EOF errors will occur.`,
|
||||||
Default: DefCacheChunkSize,
|
Default: DefCacheChunkSize,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "1m",
|
Value: "1M",
|
||||||
Help: "1MB",
|
Help: "1 MiB",
|
||||||
}, {
|
}, {
|
||||||
Value: "5M",
|
Value: "5M",
|
||||||
Help: "5 MB",
|
Help: "5 MiB",
|
||||||
}, {
|
}, {
|
||||||
Value: "10M",
|
Value: "10M",
|
||||||
Help: "10 MB",
|
Help: "10 MiB",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "info_age",
|
Name: "info_age",
|
||||||
Help: `How long to cache file structure information (directory listings, file size, times etc).
|
Help: `How long to cache file structure information (directory listings, file size, times, etc.).
|
||||||
If all write operations are done through the cache then you can safely make
|
If all write operations are done through the cache then you can safely make
|
||||||
this value very large as the cache store will also be updated in real time.`,
|
this value very large as the cache store will also be updated in real time.`,
|
||||||
Default: DefCacheInfoAge,
|
Default: DefCacheInfoAge,
|
||||||
@@ -132,13 +132,13 @@ oldest chunks until it goes under this value.`,
|
|||||||
Default: DefCacheTotalChunkSize,
|
Default: DefCacheTotalChunkSize,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "500M",
|
Value: "500M",
|
||||||
Help: "500 MB",
|
Help: "500 MiB",
|
||||||
}, {
|
}, {
|
||||||
Value: "1G",
|
Value: "1G",
|
||||||
Help: "1 GB",
|
Help: "1 GiB",
|
||||||
}, {
|
}, {
|
||||||
Value: "10G",
|
Value: "10G",
|
||||||
Help: "10 GB",
|
Help: "10 GiB",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "db_path",
|
Name: "db_path",
|
||||||
@@ -339,8 +339,14 @@ func parseRootPath(path string) (string, error) {
|
|||||||
return strings.Trim(path, "/"), nil
|
return strings.Trim(path, "/"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var warnDeprecated sync.Once
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
warnDeprecated.Do(func() {
|
||||||
|
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
|
||||||
|
})
|
||||||
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -361,15 +367,10 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
|
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||||
if err != nil {
|
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
||||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
remotePath := fspath.JoinRootPath(wPath, rootPath)
|
|
||||||
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
|
|
||||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
|
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||||
}
|
}
|
||||||
var fsErr error
|
var fsErr error
|
||||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||||
@@ -390,6 +391,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
cleanupChan: make(chan bool, 1),
|
cleanupChan: make(chan bool, 1),
|
||||||
notifiedRemotes: make(map[string]bool),
|
notifiedRemotes: make(map[string]bool),
|
||||||
}
|
}
|
||||||
|
cache.PinUntilFinalized(f.Fs, f)
|
||||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||||
|
|
||||||
f.plexConnector = &plexConnector{}
|
f.plexConnector = &plexConnector{}
|
||||||
@@ -483,7 +485,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||||
}
|
}
|
||||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||||
f.tempFs, err = cache.Get(f.opt.TempWritePath)
|
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||||
}
|
}
|
||||||
@@ -510,13 +512,13 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||||
pollInterval := make(chan time.Duration, 1)
|
pollInterval := make(chan time.Duration, 1)
|
||||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||||
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
|
doChangeNotify(ctx, f.receiveChangeNotify, pollInterval)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
DuplicateFiles: false, // storage doesn't permit this
|
DuplicateFiles: false, // storage doesn't permit this
|
||||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
// override only those features that use a temp fs and it doesn't support them
|
// override only those features that use a temp fs and it doesn't support them
|
||||||
//f.features.ChangeNotify = f.ChangeNotify
|
//f.features.ChangeNotify = f.ChangeNotify
|
||||||
if f.opt.TempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
@@ -585,7 +587,7 @@ Some valid examples are:
|
|||||||
"0:10" -> the first ten chunks
|
"0:10" -> the first ten chunks
|
||||||
|
|
||||||
Any parameter with a key that starts with "file" can be used to
|
Any parameter with a key that starts with "file" can be used to
|
||||||
specify files to fetch, eg
|
specify files to fetch, e.g.
|
||||||
|
|
||||||
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
|
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
|
||||||
|
|
||||||
@@ -1240,7 +1242,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
||||||
|
|
||||||
@@ -1521,7 +1523,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
return f.put(ctx, in, src, options, do)
|
return f.put(ctx, in, src, options, do)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
||||||
|
|
||||||
@@ -1598,7 +1600,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return co, nil
|
return co, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
||||||
|
|
||||||
@@ -1702,17 +1704,20 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return f.Fs.Hashes()
|
return f.Fs.Hashes()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge all files in the root and the root directory
|
// Purge all files in the directory
|
||||||
func (f *Fs) Purge(ctx context.Context) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
fs.Infof(f, "purging cache")
|
if dir == "" {
|
||||||
f.cache.Purge()
|
// FIXME this isn't quite right as it should purge the dir prefix
|
||||||
|
fs.Infof(f, "purging cache")
|
||||||
|
f.cache.Purge()
|
||||||
|
}
|
||||||
|
|
||||||
do := f.Fs.Features().Purge
|
do := f.Fs.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil
|
return fs.ErrorCantPurge
|
||||||
}
|
}
|
||||||
|
|
||||||
err := do(ctx)
|
err := do(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1896,6 +1901,16 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
|||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown the backend, closing any background tasks and any
|
||||||
|
// cached connections.
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
do := f.Fs.Features().Shutdown
|
||||||
|
if do == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{
|
var commandHelp = []fs.CommandHelp{
|
||||||
{
|
{
|
||||||
Name: "stats",
|
Name: "stats",
|
||||||
@@ -1940,4 +1955,5 @@ var (
|
|||||||
_ fs.Disconnecter = (*Fs)(nil)
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
_ fs.Commander = (*Fs)(nil)
|
_ fs.Commander = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
369
backend/cache/cache_internal_test.go
vendored
369
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,!js
|
||||||
// +build !race
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
@@ -16,7 +16,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -31,13 +30,10 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/rc"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/testy"
|
"github.com/rclone/rclone/fstest/testy"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/vfs"
|
|
||||||
"github.com/rclone/rclone/vfs/vfsflags"
|
"github.com/rclone/rclone/vfs/vfsflags"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -53,9 +49,7 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
remoteName string
|
remoteName string
|
||||||
mountDir string
|
|
||||||
uploadDir string
|
uploadDir string
|
||||||
useMount bool
|
|
||||||
runInstance *run
|
runInstance *run
|
||||||
errNotSupported = errors.New("not supported")
|
errNotSupported = errors.New("not supported")
|
||||||
decryptedToEncryptedRemotes = map[string]string{
|
decryptedToEncryptedRemotes = map[string]string{
|
||||||
@@ -91,9 +85,7 @@ var (
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
|
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
|
||||||
goflag.StringVar(&mountDir, "mount-dir-internal", "", "")
|
|
||||||
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
|
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
|
||||||
goflag.BoolVar(&useMount, "cache-use-mount", false, "Test only with mount")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMain drives the tests
|
// TestMain drives the tests
|
||||||
@@ -101,7 +93,7 @@ func TestMain(m *testing.M) {
|
|||||||
goflag.Parse()
|
goflag.Parse()
|
||||||
var rc int
|
var rc int
|
||||||
|
|
||||||
log.Printf("Running with the following params: \n remote: %v, \n mount: %v", remoteName, useMount)
|
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||||
runInstance = newRun()
|
runInstance = newRun()
|
||||||
rc = m.Run()
|
rc = m.Run()
|
||||||
os.Exit(rc)
|
os.Exit(rc)
|
||||||
@@ -274,31 +266,6 @@ func TestInternalObjNotFound(t *testing.T) {
|
|||||||
require.Nil(t, obj)
|
require.Nil(t, obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
|
|
||||||
if !runInstance.useMount {
|
|
||||||
t.Skip("test needs mount mode")
|
|
||||||
}
|
|
||||||
id := fmt.Sprintf("tirwffim%v", time.Now().Unix())
|
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var testData []byte
|
|
||||||
if runInstance.rootIsCrypt {
|
|
||||||
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
|
|
||||||
require.NoError(t, err)
|
|
||||||
} else {
|
|
||||||
testData = []byte("test content")
|
|
||||||
}
|
|
||||||
|
|
||||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test"), testData)
|
|
||||||
data, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, "test content", string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||||
testy.SkipUnreliable(t)
|
testy.SkipUnreliable(t)
|
||||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||||
@@ -694,79 +661,6 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
|||||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
|
||||||
cacheExpire := rc.Calls.Get("cache/expire")
|
|
||||||
assert.NotNil(t, cacheExpire)
|
|
||||||
|
|
||||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
|
||||||
|
|
||||||
if !runInstance.useMount {
|
|
||||||
t.Skipf("needs mount")
|
|
||||||
}
|
|
||||||
if !runInstance.wrappedIsExternal {
|
|
||||||
t.Skipf("needs drive")
|
|
||||||
}
|
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
chunkSize := cfs.ChunkSize()
|
|
||||||
|
|
||||||
// create some rand test data
|
|
||||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
|
||||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
|
||||||
|
|
||||||
// update in the wrapped fs
|
|
||||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
|
||||||
err = o.SetModTime(context.Background(), wrappedTime)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// get a new instance from the cache
|
|
||||||
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
|
||||||
|
|
||||||
// Call the rc function
|
|
||||||
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Contains(t, m, "status")
|
|
||||||
require.Contains(t, m, "message")
|
|
||||||
require.Equal(t, "ok", m["status"])
|
|
||||||
require.Contains(t, m["message"], "cached file cleared")
|
|
||||||
|
|
||||||
// get a new instance from the cache
|
|
||||||
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
|
||||||
_, err = runInstance.list(t, rootFs, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// create some rand test data
|
|
||||||
testData2 := randStringBytes(int(chunkSize))
|
|
||||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
|
||||||
|
|
||||||
// list should have 1 item only
|
|
||||||
li1, err := runInstance.list(t, rootFs, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, li1, 1)
|
|
||||||
|
|
||||||
// Call the rc function
|
|
||||||
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Contains(t, m, "status")
|
|
||||||
require.Contains(t, m, "message")
|
|
||||||
require.Equal(t, "ok", m["status"])
|
|
||||||
require.Contains(t, m["message"], "cached directory cleared")
|
|
||||||
|
|
||||||
// list should have 2 items now
|
|
||||||
li2, err := runInstance.list(t, rootFs, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, li2, 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInternalCacheWrites(t *testing.T) {
|
func TestInternalCacheWrites(t *testing.T) {
|
||||||
id := "ticw"
|
id := "ticw"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||||
@@ -914,15 +808,9 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
type run struct {
|
type run struct {
|
||||||
okDiff time.Duration
|
okDiff time.Duration
|
||||||
runDefaultCfgMap configmap.Simple
|
runDefaultCfgMap configmap.Simple
|
||||||
mntDir string
|
|
||||||
tmpUploadDir string
|
tmpUploadDir string
|
||||||
useMount bool
|
|
||||||
isMounted bool
|
|
||||||
rootIsCrypt bool
|
rootIsCrypt bool
|
||||||
wrappedIsExternal bool
|
wrappedIsExternal bool
|
||||||
unmountFn func() error
|
|
||||||
unmountRes chan error
|
|
||||||
vfs *vfs.VFS
|
|
||||||
tempFiles []*os.File
|
tempFiles []*os.File
|
||||||
dbPath string
|
dbPath string
|
||||||
chunkPath string
|
chunkPath string
|
||||||
@@ -932,9 +820,7 @@ type run struct {
|
|||||||
func newRun() *run {
|
func newRun() *run {
|
||||||
var err error
|
var err error
|
||||||
r := &run{
|
r := &run{
|
||||||
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
|
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
|
||||||
useMount: useMount,
|
|
||||||
isMounted: false,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read in all the defaults for all the options
|
// Read in all the defaults for all the options
|
||||||
@@ -947,36 +833,10 @@ func newRun() *run {
|
|||||||
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
|
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
|
||||||
}
|
}
|
||||||
|
|
||||||
if mountDir == "" {
|
|
||||||
if runtime.GOOS != "windows" {
|
|
||||||
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to create mount dir: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Find a free drive letter
|
|
||||||
drive := ""
|
|
||||||
for letter := 'E'; letter <= 'Z'; letter++ {
|
|
||||||
drive = string(letter) + ":"
|
|
||||||
_, err := os.Stat(drive + "\\")
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
goto found
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Print("Couldn't find free drive letter for test")
|
|
||||||
found:
|
|
||||||
r.mntDir = drive
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.mntDir = mountDir
|
|
||||||
}
|
|
||||||
log.Printf("Mount Dir: %v", r.mntDir)
|
|
||||||
|
|
||||||
if uploadDir == "" {
|
if uploadDir == "" {
|
||||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to create temp dir: %v", err)
|
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
r.tmpUploadDir = uploadDir
|
r.tmpUploadDir = uploadDir
|
||||||
@@ -1032,7 +892,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
m.Set("type", "cache")
|
m.Set("type", "cache")
|
||||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||||
} else {
|
} else {
|
||||||
remoteType := config.FileGet(remote, "type", "")
|
remoteType := config.FileGet(remote, "type")
|
||||||
if remoteType == "" {
|
if remoteType == "" {
|
||||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -1043,14 +903,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
m.Set("password", cryptPassword1)
|
m.Set("password", cryptPassword1)
|
||||||
m.Set("password2", cryptPassword2)
|
m.Set("password2", cryptPassword2)
|
||||||
}
|
}
|
||||||
remoteRemote := config.FileGet(remote, "remote", "")
|
remoteRemote := config.FileGet(remote, "remote")
|
||||||
if remoteRemote == "" {
|
if remoteRemote == "" {
|
||||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||||
remoteWrapping := remoteRemoteParts[0]
|
remoteWrapping := remoteRemoteParts[0]
|
||||||
remoteType := config.FileGet(remoteWrapping, "type", "")
|
remoteType := config.FileGet(remoteWrapping, "type")
|
||||||
if remoteType != "cache" {
|
if remoteType != "cache" {
|
||||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -1065,14 +925,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fs.Config.LowLevelRetries = 1
|
ci := fs.GetConfig(context.Background())
|
||||||
|
ci.LowLevelRetries = 1
|
||||||
|
|
||||||
// Instantiate root
|
// Instantiate root
|
||||||
if purge {
|
if purge {
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
||||||
}
|
}
|
||||||
f, err := cache.NewFs(remote, id, m)
|
f, err := cache.NewFs(context.Background(), remote, id, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -1086,33 +947,21 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
if purge {
|
if purge {
|
||||||
_ = f.Features().Purge(context.Background())
|
_ = f.Features().Purge(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
err = f.Mkdir(context.Background(), "")
|
err = f.Mkdir(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if r.useMount && !r.isMounted {
|
|
||||||
r.mountFs(t, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
return f, boltDb
|
return f, boltDb
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||||
if r.useMount && r.isMounted {
|
err := f.Features().Purge(context.Background(), "")
|
||||||
r.unmountFs(t, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.Features().Purge(context.Background())
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs.StopBackgroundRunners()
|
cfs.StopBackgroundRunners()
|
||||||
|
|
||||||
if r.useMount && runtime.GOOS != "windows" {
|
|
||||||
err = os.RemoveAll(r.mntDir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
err = os.RemoveAll(r.tmpUploadDir)
|
err = os.RemoveAll(r.tmpUploadDir)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -1152,37 +1001,11 @@ func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) f
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
|
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
|
||||||
var err error
|
r.writeObjectBytes(t, f, remote, data)
|
||||||
|
|
||||||
if r.useMount {
|
|
||||||
err = r.retryBlock(func() error {
|
|
||||||
return ioutil.WriteFile(path.Join(r.mntDir, remote), data, 0600)
|
|
||||||
}, 3, time.Second*3)
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
|
||||||
} else {
|
|
||||||
r.writeObjectBytes(t, f, remote, data)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
|
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
|
||||||
defer func() {
|
r.writeObjectReader(t, f, remote, in)
|
||||||
_ = in.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
if r.useMount {
|
|
||||||
out, err := os.Create(path.Join(r.mntDir, remote))
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
_ = out.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
_, err = io.Copy(out, in)
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
|
||||||
} else {
|
|
||||||
r.writeObjectReader(t, f, remote, in)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
||||||
@@ -1199,10 +1022,6 @@ func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Read
|
|||||||
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
||||||
obj, err := f.Put(context.Background(), in, objInfo)
|
obj, err := f.Put(context.Background(), in, objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if r.useMount {
|
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1210,26 +1029,16 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
|
|||||||
var err error
|
var err error
|
||||||
var obj fs.Object
|
var obj fs.Object
|
||||||
|
|
||||||
if r.useMount {
|
in1 := bytes.NewReader(data1)
|
||||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data1, 0600)
|
in2 := bytes.NewReader(data2)
|
||||||
require.NoError(t, err)
|
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
|
||||||
obj, err = f.NewObject(context.Background(), remote)
|
|
||||||
} else {
|
|
||||||
in1 := bytes.NewReader(data1)
|
|
||||||
in2 := bytes.NewReader(data2)
|
|
||||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
|
||||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
|
||||||
|
|
||||||
obj, err = f.Put(context.Background(), in1, objInfo1)
|
_, err = f.Put(context.Background(), in1, objInfo1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
obj, err = f.NewObject(context.Background(), remote)
|
obj, err = f.NewObject(context.Background(), remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = obj.Update(context.Background(), in2, objInfo2)
|
err = obj.Update(context.Background(), in2, objInfo2)
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return obj
|
return obj
|
||||||
@@ -1239,30 +1048,12 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
|||||||
size := end - offset
|
size := end - offset
|
||||||
checkSample := make([]byte, size)
|
checkSample := make([]byte, size)
|
||||||
|
|
||||||
if r.useMount {
|
co, err := f.NewObject(context.Background(), remote)
|
||||||
f, err := os.Open(path.Join(r.mntDir, remote))
|
if err != nil {
|
||||||
defer func() {
|
return checkSample, err
|
||||||
_ = f.Close()
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
return checkSample, err
|
|
||||||
}
|
|
||||||
_, _ = f.Seek(offset, io.SeekStart)
|
|
||||||
totalRead, err := io.ReadFull(f, checkSample)
|
|
||||||
checkSample = checkSample[:totalRead]
|
|
||||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return checkSample, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
co, err := f.NewObject(context.Background(), remote)
|
|
||||||
if err != nil {
|
|
||||||
return checkSample, err
|
|
||||||
}
|
|
||||||
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
|
||||||
}
|
}
|
||||||
|
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
||||||
|
|
||||||
if !noLengthCheck && size != int64(len(checkSample)) {
|
if !noLengthCheck && size != int64(len(checkSample)) {
|
||||||
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||||
}
|
}
|
||||||
@@ -1285,28 +1076,19 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
|
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
|
||||||
var err error
|
err := f.Mkdir(context.Background(), remote)
|
||||||
if r.useMount {
|
|
||||||
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
|
||||||
} else {
|
|
||||||
err = f.Mkdir(context.Background(), remote)
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if r.useMount {
|
var obj fs.Object
|
||||||
err = os.Remove(path.Join(r.mntDir, remote))
|
obj, err = f.NewObject(context.Background(), remote)
|
||||||
|
if err != nil {
|
||||||
|
err = f.Rmdir(context.Background(), remote)
|
||||||
} else {
|
} else {
|
||||||
var obj fs.Object
|
err = obj.Remove(context.Background())
|
||||||
obj, err = f.NewObject(context.Background(), remote)
|
|
||||||
if err != nil {
|
|
||||||
err = f.Rmdir(context.Background(), remote)
|
|
||||||
} else {
|
|
||||||
err = obj.Remove(context.Background())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@@ -1315,18 +1097,10 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
|||||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
||||||
var err error
|
var err error
|
||||||
var l []interface{}
|
var l []interface{}
|
||||||
if r.useMount {
|
var list fs.DirEntries
|
||||||
var list []os.FileInfo
|
list, err = f.List(context.Background(), remote)
|
||||||
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
|
for _, ll := range list {
|
||||||
for _, ll := range list {
|
l = append(l, ll)
|
||||||
l = append(l, ll)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var list fs.DirEntries
|
|
||||||
list, err = f.List(context.Background(), remote)
|
|
||||||
for _, ll := range list {
|
|
||||||
l = append(l, ll)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return l, err
|
return l, err
|
||||||
}
|
}
|
||||||
@@ -1355,13 +1129,7 @@ func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
|||||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if runInstance.useMount {
|
if rootFs.Features().DirMove != nil {
|
||||||
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
|
||||||
} else if rootFs.Features().DirMove != nil {
|
|
||||||
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
|
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1377,13 +1145,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if runInstance.useMount {
|
if rootFs.Features().Move != nil {
|
||||||
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
|
||||||
} else if rootFs.Features().Move != nil {
|
|
||||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1403,13 +1165,7 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if r.useMount {
|
if rootFs.Features().Copy != nil {
|
||||||
err = r.copyFile(t, rootFs, path.Join(r.mntDir, src), path.Join(r.mntDir, dst))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
|
||||||
} else if rootFs.Features().Copy != nil {
|
|
||||||
obj, err := rootFs.NewObject(context.Background(), src)
|
obj, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1429,13 +1185,6 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
|
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if r.useMount {
|
|
||||||
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
return fi.ModTime(), nil
|
|
||||||
}
|
|
||||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return time.Time{}, err
|
return time.Time{}, err
|
||||||
@@ -1446,13 +1195,6 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
|
|||||||
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if r.useMount {
|
|
||||||
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
|
|
||||||
if err != nil {
|
|
||||||
return int64(0), err
|
|
||||||
}
|
|
||||||
return fi.Size(), nil
|
|
||||||
}
|
|
||||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return int64(0), err
|
return int64(0), err
|
||||||
@@ -1463,28 +1205,15 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
|||||||
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
|
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if r.useMount {
|
var obj1 fs.Object
|
||||||
var f *os.File
|
obj1, err = rootFs.NewObject(context.Background(), src)
|
||||||
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
_ = f.Close()
|
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
|
||||||
}()
|
|
||||||
_, err = f.WriteString(data + append)
|
|
||||||
} else {
|
|
||||||
var obj1 fs.Object
|
|
||||||
obj1, err = rootFs.NewObject(context.Background(), src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
data1 := []byte(data + append)
|
|
||||||
r := bytes.NewReader(data1)
|
|
||||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
|
||||||
err = obj1.Update(context.Background(), r, objInfo1)
|
|
||||||
}
|
}
|
||||||
|
data1 := []byte(data + append)
|
||||||
|
reader := bytes.NewReader(data1)
|
||||||
|
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||||
|
err = obj1.Update(context.Background(), reader, objInfo1)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
21
backend/cache/cache_mount_other_test.go
vendored
21
backend/cache/cache_mount_other_test.go
vendored
@@ -1,21 +0,0 @@
|
|||||||
// +build !linux !go1.13
|
|
||||||
// +build !darwin !go1.13
|
|
||||||
// +build !freebsd !go1.13
|
|
||||||
// +build !windows
|
|
||||||
// +build !race
|
|
||||||
|
|
||||||
package cache_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
|
||||||
panic("mountFs not defined for this platform")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
|
||||||
panic("unmountFs not defined for this platform")
|
|
||||||
}
|
|
||||||
79
backend/cache/cache_mount_unix_test.go
vendored
79
backend/cache/cache_mount_unix_test.go
vendored
@@ -1,79 +0,0 @@
|
|||||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
|
||||||
// +build !race
|
|
||||||
|
|
||||||
package cache_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"bazil.org/fuse"
|
|
||||||
fusefs "bazil.org/fuse/fs"
|
|
||||||
"github.com/rclone/rclone/cmd/mount"
|
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
|
||||||
device := f.Name() + ":" + f.Root()
|
|
||||||
var options = []fuse.MountOption{
|
|
||||||
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
|
||||||
fuse.Subtype("rclone"),
|
|
||||||
fuse.FSName(device), fuse.VolumeName(device),
|
|
||||||
fuse.NoAppleDouble(),
|
|
||||||
fuse.NoAppleXattr(),
|
|
||||||
//fuse.AllowOther(),
|
|
||||||
}
|
|
||||||
err := os.MkdirAll(r.mntDir, os.ModePerm)
|
|
||||||
require.NoError(t, err)
|
|
||||||
c, err := fuse.Mount(r.mntDir, options...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
filesys := mount.NewFS(f)
|
|
||||||
server := fusefs.New(c, nil)
|
|
||||||
|
|
||||||
// Serve the mount point in the background returning error to errChan
|
|
||||||
r.unmountRes = make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
err := server.Serve(filesys)
|
|
||||||
closeErr := c.Close()
|
|
||||||
if err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
r.unmountRes <- err
|
|
||||||
}()
|
|
||||||
|
|
||||||
// check if the mount process has an error to report
|
|
||||||
<-c.Ready
|
|
||||||
require.NoError(t, c.MountError)
|
|
||||||
|
|
||||||
r.unmountFn = func() error {
|
|
||||||
// Shutdown the VFS
|
|
||||||
filesys.VFS.Shutdown()
|
|
||||||
return fuse.Unmount(r.mntDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.vfs = filesys.VFS
|
|
||||||
r.isMounted = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
err = r.unmountFn()
|
|
||||||
if err != nil {
|
|
||||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
|
||||||
time.Sleep(3 * time.Second)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = <-r.unmountRes
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = r.vfs.CleanUp()
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.isMounted = false
|
|
||||||
}
|
|
||||||
125
backend/cache/cache_mount_windows_test.go
vendored
125
backend/cache/cache_mount_windows_test.go
vendored
@@ -1,125 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
// +build !race
|
|
||||||
|
|
||||||
package cache_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/billziss-gh/cgofuse/fuse"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/cmd/cmount"
|
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// waitFor runs fn() until it returns true or the timeout expires
|
|
||||||
func waitFor(fn func() bool) (ok bool) {
|
|
||||||
const totalWait = 10 * time.Second
|
|
||||||
const individualWait = 10 * time.Millisecond
|
|
||||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
|
||||||
ok = fn()
|
|
||||||
if ok {
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
time.Sleep(individualWait)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
|
||||||
// FIXME implement cmount
|
|
||||||
t.Skip("windows not supported yet")
|
|
||||||
|
|
||||||
device := f.Name() + ":" + f.Root()
|
|
||||||
options := []string{
|
|
||||||
"-o", "fsname=" + device,
|
|
||||||
"-o", "subtype=rclone",
|
|
||||||
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
|
||||||
"-o", "uid=-1",
|
|
||||||
"-o", "gid=-1",
|
|
||||||
"-o", "allow_other",
|
|
||||||
// This causes FUSE to supply O_TRUNC with the Open
|
|
||||||
// call which is more efficient for cmount. However
|
|
||||||
// it does not work with cgofuse on Windows with
|
|
||||||
// WinFSP so cmount must work with or without it.
|
|
||||||
"-o", "atomic_o_trunc",
|
|
||||||
"--FileSystemName=rclone",
|
|
||||||
}
|
|
||||||
|
|
||||||
fsys := cmount.NewFS(f)
|
|
||||||
host := fuse.NewFileSystemHost(fsys)
|
|
||||||
|
|
||||||
// Serve the mount point in the background returning error to errChan
|
|
||||||
r.unmountRes = make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
var err error
|
|
||||||
ok := host.Mount(r.mntDir, options)
|
|
||||||
if !ok {
|
|
||||||
err = errors.New("mount failed")
|
|
||||||
}
|
|
||||||
r.unmountRes <- err
|
|
||||||
}()
|
|
||||||
|
|
||||||
// unmount
|
|
||||||
r.unmountFn = func() error {
|
|
||||||
// Shutdown the VFS
|
|
||||||
fsys.VFS.Shutdown()
|
|
||||||
if host.Unmount() {
|
|
||||||
if !waitFor(func() bool {
|
|
||||||
_, err := os.Stat(r.mntDir)
|
|
||||||
return err != nil
|
|
||||||
}) {
|
|
||||||
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errors.New("host unmount failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for the filesystem to become ready, checking the file
|
|
||||||
// system didn't blow up before starting
|
|
||||||
select {
|
|
||||||
case err := <-r.unmountRes:
|
|
||||||
require.NoError(t, err)
|
|
||||||
case <-time.After(time.Second * 3):
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for the mount point to be available on Windows
|
|
||||||
// On Windows the Init signal comes slightly before the mount is ready
|
|
||||||
if !waitFor(func() bool {
|
|
||||||
_, err := os.Stat(r.mntDir)
|
|
||||||
return err == nil
|
|
||||||
}) {
|
|
||||||
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.vfs = fsys.VFS
|
|
||||||
r.isMounted = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
|
||||||
// FIXME implement cmount
|
|
||||||
t.Skip("windows not supported yet")
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
err = r.unmountFn()
|
|
||||||
if err != nil {
|
|
||||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
|
||||||
time.Sleep(3 * time.Second)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = <-r.unmountRes
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = r.vfs.CleanUp()
|
|
||||||
require.NoError(t, err)
|
|
||||||
r.isMounted = false
|
|
||||||
}
|
|
||||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -1,6 +1,6 @@
|
|||||||
// Test Cache filesystem interface
|
// Test Cache filesystem interface
|
||||||
|
|
||||||
// +build !plan9
|
// +build !plan9,!js
|
||||||
// +build !race
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|||||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -1,6 +1,6 @@
|
|||||||
// Build for cache for unsupported platforms to stop go complaining
|
// Build for cache for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
// +build plan9
|
// +build plan9 js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,!js
|
||||||
// +build !race
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|||||||
2
backend/cache/directory.go
vendored
2
backend/cache/directory.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/handle.go
vendored
2
backend/cache/handle.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/object.go
vendored
2
backend/cache/object.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
@@ -32,7 +33,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
|||||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
Size: int64(kilobytes) * int64(fs.Kibi),
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -467,9 +468,15 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
billyObj := newFile("billy")
|
billyObj := newFile("billy")
|
||||||
|
billyTxn := billyObj.(*Object).xactID
|
||||||
|
if f.useNoRename {
|
||||||
|
require.True(t, billyTxn != "")
|
||||||
|
} else {
|
||||||
|
require.True(t, billyTxn == "")
|
||||||
|
}
|
||||||
|
|
||||||
billyChunkName := func(chunkNo int) string {
|
billyChunkName := func(chunkNo int) string {
|
||||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
|
return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := f.Mkdir(ctx, billyChunkName(1))
|
err := f.Mkdir(ctx, billyChunkName(1))
|
||||||
@@ -486,11 +493,13 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
// accessing chunks in strict mode is prohibited
|
// accessing chunks in strict mode is prohibited
|
||||||
f.opt.FailHard = true
|
f.opt.FailHard = true
|
||||||
billyChunk4Name := billyChunkName(4)
|
billyChunk4Name := billyChunkName(4)
|
||||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
_, err = f.base.NewObject(ctx, billyChunk4Name)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = f.NewObject(ctx, billyChunk4Name)
|
||||||
assertOverlapError(err)
|
assertOverlapError(err)
|
||||||
|
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
|
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
require.NotNil(t, billyChunk4)
|
require.NotNil(t, billyChunk4)
|
||||||
|
|
||||||
@@ -519,7 +528,8 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
|
|
||||||
// recreate billy in case it was anyhow corrupted
|
// recreate billy in case it was anyhow corrupted
|
||||||
willyObj := newFile("willy")
|
willyObj := newFile("willy")
|
||||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
|
willyTxn := willyObj.(*Object).xactID
|
||||||
|
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn)
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
||||||
f.opt.FailHard = true
|
f.opt.FailHard = true
|
||||||
@@ -560,17 +570,20 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
|||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
contents := random.String(100)
|
contents := random.String(100)
|
||||||
|
|
||||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
||||||
filename := path.Join(dir, name)
|
filename = path.Join(dir, name)
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
require.NotNil(t, obj)
|
require.NotNil(t, obj)
|
||||||
return obj, filename
|
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
||||||
|
txnID = chunkObj.xactID
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
file, fileName := newFile(f, "wreaker")
|
file, fileName, fileTxn := newFile(f, "wreaker")
|
||||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
|
wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn))
|
||||||
|
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
||||||
@@ -649,7 +662,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
|
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
todaysMeta := string(metaData)
|
todaysMeta := string(metaData)
|
||||||
runSubtest(todaysMeta, "today")
|
runSubtest(todaysMeta, "today")
|
||||||
@@ -663,6 +676,174 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
runSubtest(futureMeta, "future")
|
runSubtest(futureMeta, "future")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test that chunker refuses to change on objects with future/unknown metadata
|
||||||
|
func testFutureProof(t *testing.T, f *Fs) {
|
||||||
|
if f.opt.MetaFormat == "none" {
|
||||||
|
t.Skip("this test requires metadata support")
|
||||||
|
}
|
||||||
|
|
||||||
|
saveOpt := f.opt
|
||||||
|
ctx := context.Background()
|
||||||
|
f.opt.FailHard = true
|
||||||
|
const dir = "future"
|
||||||
|
const file = dir + "/test"
|
||||||
|
defer func() {
|
||||||
|
f.opt.FailHard = false
|
||||||
|
_ = operations.Purge(ctx, f.base, dir)
|
||||||
|
f.opt = saveOpt
|
||||||
|
}()
|
||||||
|
|
||||||
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
|
putPart := func(name string, part int, data, msg string) {
|
||||||
|
if part > 0 {
|
||||||
|
name = f.makeChunkName(name, part-1, "", "")
|
||||||
|
}
|
||||||
|
item := fstest.Item{Path: name, ModTime: modTime}
|
||||||
|
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||||
|
assert.NotNil(t, obj, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// simulate chunked object from future
|
||||||
|
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
|
||||||
|
putPart(file, 0, meta, "metaobject")
|
||||||
|
putPart(file, 1, "abc", "chunk1")
|
||||||
|
putPart(file, 2, "def", "chunk2")
|
||||||
|
putPart(file, 3, "ghi", "chunk3")
|
||||||
|
|
||||||
|
// List should succeed
|
||||||
|
ls, err := f.List(ctx, dir)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, len(ls))
|
||||||
|
assert.Equal(t, int64(9), ls[0].Size())
|
||||||
|
|
||||||
|
// NewObject should succeed
|
||||||
|
obj, err := f.NewObject(ctx, file)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, file, obj.Remote())
|
||||||
|
assert.Equal(t, int64(9), obj.Size())
|
||||||
|
|
||||||
|
// Hash must fail
|
||||||
|
_, err = obj.Hash(ctx, hash.SHA1)
|
||||||
|
assert.Equal(t, ErrMetaUnknown, err)
|
||||||
|
|
||||||
|
// Move must fail
|
||||||
|
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
|
||||||
|
assert.Nil(t, mobj)
|
||||||
|
assert.Error(t, err)
|
||||||
|
if err != nil {
|
||||||
|
assert.Contains(t, err.Error(), "please upgrade rclone")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put must fail
|
||||||
|
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
|
||||||
|
buf := bytes.NewBufferString("abc")
|
||||||
|
_, err = f.Put(ctx, buf, oi)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Rcat must fail
|
||||||
|
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||||
|
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||||
|
assert.Nil(t, robj)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
if err != nil {
|
||||||
|
assert.Contains(t, err.Error(), "please upgrade rclone")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming
|
||||||
|
// If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly
|
||||||
|
func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
||||||
|
if !f.useMeta {
|
||||||
|
t.Skip("Can't do norename transactions without metadata")
|
||||||
|
}
|
||||||
|
const dir = "backcomp"
|
||||||
|
ctx := context.Background()
|
||||||
|
saveOpt := f.opt
|
||||||
|
saveUseNoRename := f.useNoRename
|
||||||
|
defer func() {
|
||||||
|
f.opt.FailHard = false
|
||||||
|
_ = operations.Purge(ctx, f.base, dir)
|
||||||
|
f.opt = saveOpt
|
||||||
|
f.useNoRename = saveUseNoRename
|
||||||
|
}()
|
||||||
|
f.opt.ChunkSize = fs.SizeSuffix(10)
|
||||||
|
|
||||||
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
|
contents := random.String(250)
|
||||||
|
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||||
|
filename := path.Join(dir, name)
|
||||||
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||||
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
|
require.NotNil(t, obj)
|
||||||
|
return obj, filename
|
||||||
|
}
|
||||||
|
|
||||||
|
f.opt.FailHard = false
|
||||||
|
f.useNoRename = false
|
||||||
|
file, fileName := newFile(f, "renamefile")
|
||||||
|
|
||||||
|
f.opt.FailHard = false
|
||||||
|
item := fstest.NewItem(fileName, contents, modTime)
|
||||||
|
|
||||||
|
var items []fstest.Item
|
||||||
|
items = append(items, item)
|
||||||
|
|
||||||
|
f.useNoRename = true
|
||||||
|
fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision())
|
||||||
|
_, err := f.NewObject(ctx, fileName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
f.opt.FailHard = true
|
||||||
|
_, err = f.List(ctx, dir)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
f.opt.FailHard = false
|
||||||
|
_ = file.Remove(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||||
|
if !f.useMeta {
|
||||||
|
t.Skip("Can't test norename transactions without metadata")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
const dir = "servermovetest"
|
||||||
|
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir)
|
||||||
|
|
||||||
|
subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
fs1, isChunkerFs := subFs1.(*Fs)
|
||||||
|
assert.True(t, isChunkerFs)
|
||||||
|
fs1.useNoRename = false
|
||||||
|
fs1.opt.ChunkSize = fs.SizeSuffix(3)
|
||||||
|
|
||||||
|
subFs2, err := fs.NewFs(ctx, subRemote+"/subdir2")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
fs2, isChunkerFs := subFs2.(*Fs)
|
||||||
|
assert.True(t, isChunkerFs)
|
||||||
|
fs2.useNoRename = true
|
||||||
|
fs2.opt.ChunkSize = fs.SizeSuffix(3)
|
||||||
|
|
||||||
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
|
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
||||||
|
contents := "abcdef"
|
||||||
|
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||||
|
|
||||||
|
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
||||||
|
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(len(contents)), dstFile.Size())
|
||||||
|
|
||||||
|
r, err := dstFile.Open(ctx)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, r)
|
||||||
|
data, err := ioutil.ReadAll(r)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, contents, string(data))
|
||||||
|
_ = r.Close()
|
||||||
|
_ = operations.Purge(ctx, f.base, dir)
|
||||||
|
}
|
||||||
|
|
||||||
// InternalTest dispatches all internal tests
|
// InternalTest dispatches all internal tests
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("PutLarge", func(t *testing.T) {
|
t.Run("PutLarge", func(t *testing.T) {
|
||||||
@@ -686,6 +867,15 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("MetadataInput", func(t *testing.T) {
|
t.Run("MetadataInput", func(t *testing.T) {
|
||||||
testMetadataInput(t, f)
|
testMetadataInput(t, f)
|
||||||
})
|
})
|
||||||
|
t.Run("FutureProof", func(t *testing.T) {
|
||||||
|
testFutureProof(t, f)
|
||||||
|
})
|
||||||
|
t.Run("BackwardsCompatibility", func(t *testing.T) {
|
||||||
|
testBackwardsCompatibility(t, f)
|
||||||
|
})
|
||||||
|
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
||||||
|
testChunkerServerSideMove(t, f)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -15,10 +15,10 @@ import (
|
|||||||
|
|
||||||
// Command line flags
|
// Command line flags
|
||||||
var (
|
var (
|
||||||
// Invalid characters are not supported by some remotes, eg. Mailru.
|
// Invalid characters are not supported by some remotes, e.g. Mailru.
|
||||||
// We enable testing with invalid characters when -remote is not set, so
|
// We enable testing with invalid characters when -remote is not set, so
|
||||||
// chunker overlays a local directory, but invalid characters are disabled
|
// chunker overlays a local directory, but invalid characters are disabled
|
||||||
// by default when -remote is set, eg. when test_all runs backend tests.
|
// by default when -remote is set, e.g. when test_all runs backend tests.
|
||||||
// You can still test with invalid characters using the below flag.
|
// You can still test with invalid characters using the below flag.
|
||||||
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
||||||
)
|
)
|
||||||
|
|||||||
1
backend/compress/.gitignore
vendored
Normal file
1
backend/compress/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
test
|
||||||
1416
backend/compress/compress.go
Normal file
1416
backend/compress/compress.go
Normal file
File diff suppressed because it is too large
Load Diff
65
backend/compress/compress_test.go
Normal file
65
backend/compress/compress_test.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
// Test Crypt filesystem interface
|
||||||
|
package compress
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
|
_ "github.com/rclone/rclone/backend/s3"
|
||||||
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
opt := fstests.Opt{
|
||||||
|
RemoteName: *fstest.RemoteName,
|
||||||
|
NilObject: (*Object)(nil),
|
||||||
|
UnimplementableFsMethods: []string{
|
||||||
|
"OpenWriterAt",
|
||||||
|
"MergeDirs",
|
||||||
|
"DirCacheFlush",
|
||||||
|
"PutUnchecked",
|
||||||
|
"PutStream",
|
||||||
|
"UserInfo",
|
||||||
|
"Disconnect",
|
||||||
|
},
|
||||||
|
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||||
|
UnimplementableObjectMethods: []string{}}
|
||||||
|
fstests.Run(t, &opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRemoteGzip tests GZIP compression
|
||||||
|
func TestRemoteGzip(t *testing.T) {
|
||||||
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("Skipping as -remote set")
|
||||||
|
}
|
||||||
|
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||||
|
name := "TestCompressGzip"
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: name + ":",
|
||||||
|
NilObject: (*Object)(nil),
|
||||||
|
UnimplementableFsMethods: []string{
|
||||||
|
"OpenWriterAt",
|
||||||
|
"MergeDirs",
|
||||||
|
"DirCacheFlush",
|
||||||
|
"PutUnchecked",
|
||||||
|
"PutStream",
|
||||||
|
"UserInfo",
|
||||||
|
"Disconnect",
|
||||||
|
},
|
||||||
|
UnimplementableObjectMethods: []string{
|
||||||
|
"GetTier",
|
||||||
|
"SetTier",
|
||||||
|
},
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "type", Value: "compress"},
|
||||||
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
|
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -12,12 +12,14 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
|
"github.com/rclone/rclone/lib/version"
|
||||||
"github.com/rfjakob/eme"
|
"github.com/rfjakob/eme"
|
||||||
"golang.org/x/crypto/nacl/secretbox"
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
"golang.org/x/crypto/scrypt"
|
"golang.org/x/crypto/scrypt"
|
||||||
@@ -147,7 +149,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
|||||||
// If salt is "" we use a fixed salt just to make attackers lives
|
// If salt is "" we use a fixed salt just to make attackers lives
|
||||||
// slighty harder than using no salt.
|
// slighty harder than using no salt.
|
||||||
//
|
//
|
||||||
// Note that empty passsword makes all 0x00 keys which is used in the
|
// Note that empty password makes all 0x00 keys which is used in the
|
||||||
// tests.
|
// tests.
|
||||||
func (c *Cipher) Key(password, salt string) (err error) {
|
func (c *Cipher) Key(password, salt string) (err error) {
|
||||||
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
||||||
@@ -442,11 +444,32 @@ func (c *Cipher) encryptFileName(in string) string {
|
|||||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Strip version string so that only the non-versioned part
|
||||||
|
// of the file name gets encrypted/obfuscated
|
||||||
|
hasVersion := false
|
||||||
|
var t time.Time
|
||||||
|
if i == (len(segments)-1) && version.Match(segments[i]) {
|
||||||
|
var s string
|
||||||
|
t, s = version.Remove(segments[i])
|
||||||
|
// version.Remove can fail, in which case it returns segments[i]
|
||||||
|
if s != segments[i] {
|
||||||
|
segments[i] = s
|
||||||
|
hasVersion = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if c.mode == NameEncryptionStandard {
|
if c.mode == NameEncryptionStandard {
|
||||||
segments[i] = c.encryptSegment(segments[i])
|
segments[i] = c.encryptSegment(segments[i])
|
||||||
} else {
|
} else {
|
||||||
segments[i] = c.obfuscateSegment(segments[i])
|
segments[i] = c.obfuscateSegment(segments[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add back a version to the encrypted/obfuscated
|
||||||
|
// file name, if we stripped it off earlier
|
||||||
|
if hasVersion {
|
||||||
|
segments[i] = version.Add(segments[i], t)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return strings.Join(segments, "/")
|
return strings.Join(segments, "/")
|
||||||
}
|
}
|
||||||
@@ -477,6 +500,21 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Strip version string so that only the non-versioned part
|
||||||
|
// of the file name gets decrypted/deobfuscated
|
||||||
|
hasVersion := false
|
||||||
|
var t time.Time
|
||||||
|
if i == (len(segments)-1) && version.Match(segments[i]) {
|
||||||
|
var s string
|
||||||
|
t, s = version.Remove(segments[i])
|
||||||
|
// version.Remove can fail, in which case it returns segments[i]
|
||||||
|
if s != segments[i] {
|
||||||
|
segments[i] = s
|
||||||
|
hasVersion = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if c.mode == NameEncryptionStandard {
|
if c.mode == NameEncryptionStandard {
|
||||||
segments[i], err = c.decryptSegment(segments[i])
|
segments[i], err = c.decryptSegment(segments[i])
|
||||||
} else {
|
} else {
|
||||||
@@ -486,6 +524,12 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add back a version to the decrypted/deobfuscated
|
||||||
|
// file name, if we stripped it off earlier
|
||||||
|
if hasVersion {
|
||||||
|
segments[i] = version.Add(segments[i], t)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return strings.Join(segments, "/"), nil
|
return strings.Join(segments, "/"), nil
|
||||||
}
|
}
|
||||||
@@ -494,10 +538,18 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||||
if c.mode == NameEncryptionOff {
|
if c.mode == NameEncryptionOff {
|
||||||
remainingLength := len(in) - len(encryptedSuffix)
|
remainingLength := len(in) - len(encryptedSuffix)
|
||||||
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
|
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||||
return in[:remainingLength], nil
|
return "", ErrorNotAnEncryptedFile
|
||||||
}
|
}
|
||||||
return "", ErrorNotAnEncryptedFile
|
decrypted := in[:remainingLength]
|
||||||
|
if version.Match(decrypted) {
|
||||||
|
_, unversioned := version.Remove(decrypted)
|
||||||
|
if unversioned == "" {
|
||||||
|
return "", ErrorNotAnEncryptedFile
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Leave the version string on, if it was there
|
||||||
|
return decrypted, nil
|
||||||
}
|
}
|
||||||
return c.decryptFileName(in)
|
return c.decryptFileName(in)
|
||||||
}
|
}
|
||||||
@@ -633,11 +685,8 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
// possibly err != nil here, but we will process the
|
// possibly err != nil here, but we will process the
|
||||||
// data and the next call to ReadFull will return 0, err
|
// data and the next call to ReadFull will return 0, err
|
||||||
// Write nonce to start of block
|
|
||||||
copy(fh.buf, fh.nonce[:])
|
|
||||||
// Encrypt the block using the nonce
|
// Encrypt the block using the nonce
|
||||||
block := fh.buf
|
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
fh.bufSize = blockHeaderSize + n
|
fh.bufSize = blockHeaderSize + n
|
||||||
fh.nonce.increment()
|
fh.nonce.increment()
|
||||||
@@ -782,8 +831,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||||||
return ErrorEncryptedFileBadHeader
|
return ErrorEncryptedFileBadHeader
|
||||||
}
|
}
|
||||||
// Decrypt the block using the nonce
|
// Decrypt the block using the nonce
|
||||||
block := fh.buf
|
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err // return pending error as it is likely more accurate
|
return err // return pending error as it is likely more accurate
|
||||||
|
|||||||
@@ -160,22 +160,29 @@ func TestEncryptFileName(t *testing.T) {
|
|||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||||
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||||
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||||
// Standard mode with directory name encryption off
|
// Standard mode with directory name encryption off
|
||||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||||
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||||
|
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||||
|
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||||
// Now off mode
|
// Now off mode
|
||||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||||
// Obfuscation mode
|
// Obfuscation mode
|
||||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
||||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||||
|
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||||
|
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
||||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||||
// Obfuscation mode with directory name encryption off
|
// Obfuscation mode with directory name encryption off
|
||||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
||||||
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||||
|
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||||
}
|
}
|
||||||
@@ -194,14 +201,19 @@ func TestDecryptFileName(t *testing.T) {
|
|||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||||
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||||
|
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
|
||||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||||
|
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||||
|
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||||
|
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||||
|
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||||
} {
|
} {
|
||||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||||
actual, actualErr := c.DecryptFileName(test.in)
|
actual, actualErr := c.DecryptFileName(test.in)
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
|
"github.com/rclone/rclone/fs/cache"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
@@ -29,7 +30,7 @@ func init() {
|
|||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "filename_encryption",
|
Name: "filename_encryption",
|
||||||
@@ -75,7 +76,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "server_side_across_configs",
|
Name: "server_side_across_configs",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Allow server side operations (eg copy) to work across different crypt configs.
|
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||||
|
|
||||||
Normally this option is not what you want, but if you have two crypts
|
Normally this option is not what you want, but if you have two crypts
|
||||||
pointing to the same backend you can use it.
|
pointing to the same backend you can use it.
|
||||||
@@ -100,6 +101,21 @@ names, or for debugging purposes.`,
|
|||||||
Default: false,
|
Default: false,
|
||||||
Hide: fs.OptionHideConfigurator,
|
Hide: fs.OptionHideConfigurator,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_data_encryption",
|
||||||
|
Help: "Option to either encrypt file data or leave it unencrypted.",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
Examples: []fs.OptionExample{
|
||||||
|
{
|
||||||
|
Value: "true",
|
||||||
|
Help: "Don't encrypt file data, leave it unencrypted.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "false",
|
||||||
|
Help: "Encrypt file data.",
|
||||||
|
},
|
||||||
|
},
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -143,7 +159,7 @@ func NewCipher(m configmap.Mapper) (*Cipher, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -158,24 +174,25 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if strings.HasPrefix(remote, name+":") {
|
if strings.HasPrefix(remote, name+":") {
|
||||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
// Make sure to remove trailing . referring to the current dir
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
|
||||||
}
|
|
||||||
// Make sure to remove trailing . reffering to the current dir
|
|
||||||
if path.Base(rpath) == "." {
|
if path.Base(rpath) == "." {
|
||||||
rpath = strings.TrimSuffix(rpath, ".")
|
rpath = strings.TrimSuffix(rpath, ".")
|
||||||
}
|
}
|
||||||
// Look for a file first
|
// Look for a file first
|
||||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
var wrappedFs fs.Fs
|
||||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
if rpath == "" {
|
||||||
// if that didn't produce a file, look for a directory
|
wrappedFs, err = cache.Get(ctx, remote)
|
||||||
if err != fs.ErrorIsFile {
|
} else {
|
||||||
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
|
||||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
wrappedFs, err = cache.Get(ctx, remotePath)
|
||||||
|
// if that didn't produce a file, look for a directory
|
||||||
|
if err != fs.ErrorIsFile {
|
||||||
|
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
|
||||||
|
wrappedFs, err = cache.Get(ctx, remotePath)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
if err != fs.ErrorIsFile && err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
|
||||||
}
|
}
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
@@ -184,6 +201,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
opt: *opt,
|
opt: *opt,
|
||||||
cipher: cipher,
|
cipher: cipher,
|
||||||
}
|
}
|
||||||
|
cache.PinUntilFinalized(f.Fs, f)
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -196,7 +214,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
@@ -206,6 +224,7 @@ type Options struct {
|
|||||||
Remote string `config:"remote"`
|
Remote string `config:"remote"`
|
||||||
FilenameEncryption string `config:"filename_encryption"`
|
FilenameEncryption string `config:"filename_encryption"`
|
||||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||||
|
NoDataEncryption bool `config:"no_data_encryption"`
|
||||||
Password string `config:"password"`
|
Password string `config:"password"`
|
||||||
Password2 string `config:"password2"`
|
Password2 string `config:"password2"`
|
||||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||||
@@ -343,6 +362,10 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
|||||||
|
|
||||||
// put implements Put or PutStream
|
// put implements Put or PutStream
|
||||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||||
|
if f.opt.NoDataEncryption {
|
||||||
|
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||||
|
}
|
||||||
|
|
||||||
// Encrypt the data into wrappedIn
|
// Encrypt the data into wrappedIn
|
||||||
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -381,13 +404,16 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||||
}
|
}
|
||||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
if srcHash != "" && dstHash != "" {
|
||||||
// remove object
|
if srcHash != dstHash {
|
||||||
err = o.Remove(ctx)
|
// remove object
|
||||||
if err != nil {
|
err = o.Remove(ctx)
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
if err != nil {
|
||||||
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
|
}
|
||||||
|
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -427,21 +453,21 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge all files in the root and the root directory
|
// Purge all files in the directory specified
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of deleting all the files
|
// Implement this if you have a way of deleting all the files
|
||||||
// quicker than just running Remove() on the result of List()
|
// quicker than just running Remove() on the result of List()
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist
|
// Return an error if it doesn't exist
|
||||||
func (f *Fs) Purge(ctx context.Context) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
do := f.Fs.Features().Purge
|
do := f.Fs.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return fs.ErrorCantPurge
|
return fs.ErrorCantPurge
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -466,7 +492,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return f.newObject(oResult), nil
|
return f.newObject(oResult), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -492,7 +518,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -614,6 +640,10 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
|||||||
//
|
//
|
||||||
// Note that we break lots of encapsulation in this function.
|
// Note that we break lots of encapsulation in this function.
|
||||||
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||||
|
if f.opt.NoDataEncryption {
|
||||||
|
return src.Hash(ctx, hashType)
|
||||||
|
}
|
||||||
|
|
||||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||||
// use a limited read so we only read the header
|
// use a limited read so we only read the header
|
||||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||||
@@ -819,9 +849,13 @@ func (o *Object) Remote() string {
|
|||||||
|
|
||||||
// Size returns the size of the file
|
// Size returns the size of the file
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
|
size := o.Object.Size()
|
||||||
if err != nil {
|
if !o.f.opt.NoDataEncryption {
|
||||||
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
var err error
|
||||||
|
size, err = o.f.cipher.DecryptedSize(size)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
@@ -839,6 +873,10 @@ func (o *Object) UnWrap() fs.Object {
|
|||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
|
if o.f.opt.NoDataEncryption {
|
||||||
|
return o.Object.Open(ctx, options...)
|
||||||
|
}
|
||||||
|
|
||||||
var openOptions []fs.OpenOption
|
var openOptions []fs.OpenOption
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@@ -914,6 +952,16 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
|||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown the backend, closing any background tasks and any
|
||||||
|
// cached connections.
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
do := f.Fs.Features().Shutdown
|
||||||
|
if do == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||||
//
|
//
|
||||||
// This encrypts the remote name and adjusts the size
|
// This encrypts the remote name and adjusts the size
|
||||||
@@ -1022,6 +1070,7 @@ var (
|
|||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.UserInfoer = (*Fs)(nil)
|
_ fs.UserInfoer = (*Fs)(nil)
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ func (o testWrapper) UnWrap() fs.Object {
|
|||||||
// Create a temporary local fs to upload things from
|
// Create a temporary local fs to upload things from
|
||||||
|
|
||||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||||
localFs, err := fs.TemporaryLocalFs()
|
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cleanup = func() {
|
cleanup = func() {
|
||||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||||
@@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// wrap the object in a crypt for upload using the nonce we
|
// wrap the object in a crypt for upload using the nonce we
|
||||||
// saved from the encryptor
|
// saved from the encrypter
|
||||||
src := f.newObjectInfo(oi, nonce)
|
src := f.newObjectInfo(oi, nonce)
|
||||||
|
|
||||||
// Test ObjectInfo methods
|
// Test ObjectInfo methods
|
||||||
|
|||||||
@@ -91,3 +91,26 @@ func TestObfuscate(t *testing.T) {
|
|||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestNoDataObfuscate runs integration tests against the remote
|
||||||
|
func TestNoDataObfuscate(t *testing.T) {
|
||||||
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("Skipping as -remote set")
|
||||||
|
}
|
||||||
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||||
|
name := "TestCrypt4"
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: name + ":",
|
||||||
|
NilObject: (*crypt.Object)(nil),
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "type", Value: "crypt"},
|
||||||
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||||
|
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||||
|
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||||
|
},
|
||||||
|
SkipBadWindowsCharacters: true,
|
||||||
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -7,16 +7,21 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/api/drive/v3"
|
"google.golang.org/api/drive/v3"
|
||||||
@@ -106,6 +111,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalFindExportFormat(t *testing.T) {
|
func TestInternalFindExportFormat(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
item := &drive.File{
|
item := &drive.File{
|
||||||
Name: "file",
|
Name: "file",
|
||||||
MimeType: "application/vnd.google-apps.document",
|
MimeType: "application/vnd.google-apps.document",
|
||||||
@@ -123,7 +129,7 @@ func TestInternalFindExportFormat(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
f := new(Fs)
|
f := new(Fs)
|
||||||
f.exportExtensions = test.extensions
|
f.exportExtensions = test.extensions
|
||||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item)
|
||||||
assert.Equal(t, test.wantExtension, gotExtension)
|
assert.Equal(t, test.wantExtension, gotExtension)
|
||||||
if test.wantExtension != "" {
|
if test.wantExtension != "" {
|
||||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
||||||
@@ -191,7 +197,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
|||||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||||
@@ -205,7 +211,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
|||||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||||
@@ -269,14 +275,15 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// from fstest/fstests/fstests.go
|
||||||
|
existingDir = "hello? sausage"
|
||||||
|
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||||
|
existingSubDir = "êé"
|
||||||
|
)
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
||||||
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||||
const (
|
|
||||||
// from fstest/fstests/fstests.go
|
|
||||||
existingDir = "hello? sausage"
|
|
||||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
|
||||||
existingSubDir = "êé"
|
|
||||||
)
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
srcObj, err := f.NewObject(ctx, existingFile)
|
srcObj, err := f.NewObject(ctx, existingFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -361,6 +368,99 @@ func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash
|
||||||
|
func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Make some objects, one in a subdir
|
||||||
|
contents := random.String(100)
|
||||||
|
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
|
||||||
|
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||||
|
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
|
||||||
|
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||||
|
|
||||||
|
// Check objects
|
||||||
|
checkObjects := func() {
|
||||||
|
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{
|
||||||
|
file1,
|
||||||
|
file2,
|
||||||
|
}, []string{
|
||||||
|
"trashDir/subdir",
|
||||||
|
}, f.Precision())
|
||||||
|
}
|
||||||
|
checkObjects()
|
||||||
|
|
||||||
|
// Make sure we are using the trash
|
||||||
|
require.Equal(t, true, f.opt.UseTrash)
|
||||||
|
|
||||||
|
// Remove the object and the dir
|
||||||
|
require.NoError(t, obj1.Remove(ctx))
|
||||||
|
require.NoError(t, f.Purge(ctx, "trashDir/subdir"))
|
||||||
|
|
||||||
|
// Check objects gone
|
||||||
|
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision())
|
||||||
|
|
||||||
|
// Restore the object and directory
|
||||||
|
r, err := f.unTrashDir(ctx, "trashDir", true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r)
|
||||||
|
|
||||||
|
// Check objects restored
|
||||||
|
checkObjects()
|
||||||
|
|
||||||
|
// Remove the test dir
|
||||||
|
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||||
|
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
obj, err := f.NewObject(ctx, existingFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
o := obj.(*Object)
|
||||||
|
|
||||||
|
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
_ = os.RemoveAll(dir)
|
||||||
|
}()
|
||||||
|
|
||||||
|
checkFile := func(name string) {
|
||||||
|
filePath := filepath.Join(dir, name)
|
||||||
|
fi, err := os.Stat(filePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(100), fi.Size())
|
||||||
|
err = os.Remove(filePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("BadID", func(t *testing.T) {
|
||||||
|
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "couldn't find id")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Directory", func(t *testing.T) {
|
||||||
|
rootID, err := f.dirCache.RootID(ctx, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = f.copyID(ctx, rootID, dir+"/")
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "can't copy directory")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WithoutDestName", func(t *testing.T) {
|
||||||
|
err = f.copyID(ctx, o.id, dir+"/")
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkFile(path.Base(existingFile))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WithDestName", func(t *testing.T) {
|
||||||
|
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkFile("potato.txt")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
// These tests all depend on each other so run them as nested tests
|
// These tests all depend on each other so run them as nested tests
|
||||||
t.Run("DocumentImport", func(t *testing.T) {
|
t.Run("DocumentImport", func(t *testing.T) {
|
||||||
@@ -376,6 +476,8 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||||
|
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||||
|
t.Run("CopyID", f.InternalTestCopyID)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -77,11 +77,10 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
var req *http.Request
|
var req *http.Request
|
||||||
req, err = http.NewRequest(method, urls, body)
|
req, err = http.NewRequestWithContext(ctx, method, urls, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
googleapi.Expand(req.URL, map[string]string{
|
googleapi.Expand(req.URL, map[string]string{
|
||||||
"fileId": fileID,
|
"fileId": fileID,
|
||||||
})
|
})
|
||||||
@@ -95,7 +94,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
|||||||
defer googleapi.CloseBody(res)
|
defer googleapi.CloseBody(res)
|
||||||
err = googleapi.CheckResponse(res)
|
err = googleapi.CheckResponse(res)
|
||||||
}
|
}
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -114,8 +113,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
|||||||
|
|
||||||
// Make an http.Request for the range passed in
|
// Make an http.Request for the range passed in
|
||||||
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body)
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
req.ContentLength = reqSize
|
req.ContentLength = reqSize
|
||||||
totalSize := "*"
|
totalSize := "*"
|
||||||
if rx.ContentLength >= 0 {
|
if rx.ContentLength >= 0 {
|
||||||
@@ -204,7 +202,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
|||||||
err = rx.f.pacer.Call(func() (bool, error) {
|
err = rx.f.pacer.Call(func() (bool, error) {
|
||||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||||
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
||||||
again, err := rx.f.shouldRetry(err)
|
again, err := rx.f.shouldRetry(ctx, err)
|
||||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||||
again = false
|
again = false
|
||||||
err = nil
|
err = nil
|
||||||
|
|||||||
350
backend/dropbox/batcher.go
Normal file
350
backend/dropbox/batcher.go
Normal file
@@ -0,0 +1,350 @@
|
|||||||
|
// This file contains the implementation of the sync batcher for uploads
|
||||||
|
//
|
||||||
|
// Dropbox rules say you can start as many batches as you want, but
|
||||||
|
// you may only have one batch being committed and must wait for the
|
||||||
|
// batch to be finished before committing another.
|
||||||
|
|
||||||
|
package dropbox
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async"
|
||||||
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxBatchSize = 1000 // max size the batch can be
|
||||||
|
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
|
||||||
|
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
|
||||||
|
defaultBatchSizeAsync = 100 // default batch size if async
|
||||||
|
)
|
||||||
|
|
||||||
|
// batcher holds info about the current items waiting for upload
|
||||||
|
type batcher struct {
|
||||||
|
f *Fs // Fs this batch is part of
|
||||||
|
mode string // configured batch mode
|
||||||
|
size int // maximum size for batch
|
||||||
|
timeout time.Duration // idle timeout for batch
|
||||||
|
async bool // whether we are using async batching
|
||||||
|
in chan batcherRequest // incoming items to batch
|
||||||
|
closed chan struct{} // close to indicate batcher shut down
|
||||||
|
atexit atexit.FnHandle // atexit handle
|
||||||
|
shutOnce sync.Once // make sure we shutdown once only
|
||||||
|
wg sync.WaitGroup // wait for shutdown
|
||||||
|
}
|
||||||
|
|
||||||
|
// batcherRequest holds an incoming request with a place for a reply
|
||||||
|
type batcherRequest struct {
|
||||||
|
commitInfo *files.UploadSessionFinishArg
|
||||||
|
result chan<- batcherResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if batcherRequest is the quit request
|
||||||
|
func (br *batcherRequest) isQuit() bool {
|
||||||
|
return br.commitInfo == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send this to get the engine to quit
|
||||||
|
var quitRequest = batcherRequest{}
|
||||||
|
|
||||||
|
// batcherResponse holds a response to be delivered to clients waiting
|
||||||
|
// for a batch to complete.
|
||||||
|
type batcherResponse struct {
|
||||||
|
err error
|
||||||
|
entry *files.FileMetadata
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBatcher creates a new batcher structure
|
||||||
|
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
||||||
|
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
||||||
|
if size > maxBatchSize || size < 0 {
|
||||||
|
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
async := false
|
||||||
|
|
||||||
|
switch mode {
|
||||||
|
case "sync":
|
||||||
|
if size <= 0 {
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
|
size = ci.Transfers
|
||||||
|
}
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = defaultTimeoutSync
|
||||||
|
}
|
||||||
|
case "async":
|
||||||
|
if size <= 0 {
|
||||||
|
size = defaultBatchSizeAsync
|
||||||
|
}
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = defaultTimeoutAsync
|
||||||
|
}
|
||||||
|
async = true
|
||||||
|
case "off":
|
||||||
|
size = 0
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &batcher{
|
||||||
|
f: f,
|
||||||
|
mode: mode,
|
||||||
|
size: size,
|
||||||
|
timeout: timeout,
|
||||||
|
async: async,
|
||||||
|
in: make(chan batcherRequest, size),
|
||||||
|
closed: make(chan struct{}),
|
||||||
|
}
|
||||||
|
if b.Batching() {
|
||||||
|
b.atexit = atexit.Register(b.Shutdown)
|
||||||
|
b.wg.Add(1)
|
||||||
|
go b.commitLoop(context.Background())
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batching returns true if batching is active
|
||||||
|
func (b *batcher) Batching() bool {
|
||||||
|
return b.size > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||||
|
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
||||||
|
var arg = &files.UploadSessionFinishBatchArg{
|
||||||
|
Entries: items,
|
||||||
|
}
|
||||||
|
err = b.f.pacer.Call(func() (bool, error) {
|
||||||
|
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
||||||
|
// If error is insufficient space then don't retry
|
||||||
|
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||||
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||||
|
err = fserrors.NoRetryError(err)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// after the first chunk is uploaded, we retry everything
|
||||||
|
return err != nil, err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "batch commit failed")
|
||||||
|
}
|
||||||
|
return batchStatus, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||||
|
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||||
|
if launchBatchStatus.AsyncJobId == "" {
|
||||||
|
return nil, errors.New("wait for batch completion: empty job ID")
|
||||||
|
}
|
||||||
|
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||||
|
sleepTime := 100 * time.Millisecond
|
||||||
|
const maxTries = 120
|
||||||
|
for try := 1; try <= maxTries; try++ {
|
||||||
|
err = b.f.pacer.Call(func() (bool, error) {
|
||||||
|
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||||
|
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||||
|
})
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d/%d", sleepTime, err, try, maxTries)
|
||||||
|
} else {
|
||||||
|
if batchStatus.Tag == "complete" {
|
||||||
|
return batchStatus.Complete, nil
|
||||||
|
}
|
||||||
|
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d/%d", sleepTime, batchStatus.Tag, try, maxTries)
|
||||||
|
}
|
||||||
|
time.Sleep(sleepTime)
|
||||||
|
sleepTime *= 2
|
||||||
|
if sleepTime > time.Second {
|
||||||
|
sleepTime = time.Second
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = errors.New("batch didn't complete")
|
||||||
|
}
|
||||||
|
return nil, errors.Wrapf(err, "wait for batch failed after %d tries", maxTries)
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit a batch
|
||||||
|
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||||
|
// If commit fails then signal clients if sync
|
||||||
|
var signalled = b.async
|
||||||
|
defer func() {
|
||||||
|
if err != nil && signalled {
|
||||||
|
// Signal to clients that there was an error
|
||||||
|
for _, result := range results {
|
||||||
|
result <- batcherResponse{err: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
|
||||||
|
fs.Debugf(b.f, "Committing %s", desc)
|
||||||
|
|
||||||
|
// finalise the batch getting either a result or a job id to poll
|
||||||
|
batchStatus, err := b.finishBatch(ctx, items)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check whether batch is complete
|
||||||
|
var complete *files.UploadSessionFinishBatchResult
|
||||||
|
switch batchStatus.Tag {
|
||||||
|
case "async_job_id":
|
||||||
|
// wait for batch to complete
|
||||||
|
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case "complete":
|
||||||
|
complete = batchStatus.Complete
|
||||||
|
default:
|
||||||
|
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check we got the right number of entries
|
||||||
|
entries := complete.Entries
|
||||||
|
if len(entries) != len(results) {
|
||||||
|
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report results to clients
|
||||||
|
var (
|
||||||
|
errorTag = ""
|
||||||
|
errorCount = 0
|
||||||
|
)
|
||||||
|
for i := range results {
|
||||||
|
item := entries[i]
|
||||||
|
resp := batcherResponse{}
|
||||||
|
if item.Tag == "success" {
|
||||||
|
resp.entry = item.Success
|
||||||
|
} else {
|
||||||
|
errorCount++
|
||||||
|
errorTag = item.Tag
|
||||||
|
if item.Failure != nil {
|
||||||
|
errorTag = item.Failure.Tag
|
||||||
|
if item.Failure.LookupFailed != nil {
|
||||||
|
errorTag += "/" + item.Failure.LookupFailed.Tag
|
||||||
|
}
|
||||||
|
if item.Failure.Path != nil {
|
||||||
|
errorTag += "/" + item.Failure.Path.Tag
|
||||||
|
}
|
||||||
|
if item.Failure.PropertiesError != nil {
|
||||||
|
errorTag += "/" + item.Failure.PropertiesError.Tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
|
||||||
|
}
|
||||||
|
if !b.async {
|
||||||
|
results[i] <- resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Show signalled so no need to report error to clients from now on
|
||||||
|
signalled = true
|
||||||
|
|
||||||
|
// Report an error if any failed in the batch
|
||||||
|
if errorTag != "" {
|
||||||
|
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Debugf(b.f, "Committed %s", desc)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// commitLoop runs the commit engine in the background
|
||||||
|
func (b *batcher) commitLoop(ctx context.Context) {
|
||||||
|
var (
|
||||||
|
items []*files.UploadSessionFinishArg // current batch of uncommitted files
|
||||||
|
results []chan<- batcherResponse // current batch of clients awaiting results
|
||||||
|
idleTimer = time.NewTimer(b.timeout)
|
||||||
|
commit = func() {
|
||||||
|
err := b.commitBatch(ctx, items, results)
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
|
||||||
|
}
|
||||||
|
items, results = nil, nil
|
||||||
|
}
|
||||||
|
)
|
||||||
|
defer b.wg.Done()
|
||||||
|
defer idleTimer.Stop()
|
||||||
|
idleTimer.Stop()
|
||||||
|
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case req := <-b.in:
|
||||||
|
if req.isQuit() {
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
items = append(items, req.commitInfo)
|
||||||
|
results = append(results, req.result)
|
||||||
|
idleTimer.Stop()
|
||||||
|
if len(items) >= b.size {
|
||||||
|
commit()
|
||||||
|
} else {
|
||||||
|
idleTimer.Reset(b.timeout)
|
||||||
|
}
|
||||||
|
case <-idleTimer.C:
|
||||||
|
if len(items) > 0 {
|
||||||
|
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
|
||||||
|
commit()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// commit any remaining items
|
||||||
|
if len(items) > 0 {
|
||||||
|
commit()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown finishes any pending batches then shuts everything down
|
||||||
|
//
|
||||||
|
// Can be called from atexit handler
|
||||||
|
func (b *batcher) Shutdown() {
|
||||||
|
b.shutOnce.Do(func() {
|
||||||
|
atexit.Unregister(b.atexit)
|
||||||
|
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||||
|
// show that batcher is shutting down
|
||||||
|
close(b.closed)
|
||||||
|
// quit the commitLoop by sending a quitRequest message
|
||||||
|
//
|
||||||
|
// Note that we don't close b.in because that will
|
||||||
|
// cause write to closed channel in Commit when we are
|
||||||
|
// exiting due to a signal.
|
||||||
|
b.in <- quitRequest
|
||||||
|
b.wg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit commits the file using a batch call, first adding it to the
|
||||||
|
// batch and then waiting for the batch to complete in a synchronous
|
||||||
|
// way if async is not set.
|
||||||
|
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
|
||||||
|
select {
|
||||||
|
case <-b.closed:
|
||||||
|
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
|
||||||
|
resp := make(chan batcherResponse, 1)
|
||||||
|
b.in <- batcherRequest{
|
||||||
|
commitInfo: commitInfo,
|
||||||
|
result: resp,
|
||||||
|
}
|
||||||
|
// If running async then don't wait for the result
|
||||||
|
if b.async {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
result := <-resp
|
||||||
|
return result.entry, result.err
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
44
backend/dropbox/dropbox_internal_test.go
Normal file
44
backend/dropbox/dropbox_internal_test.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package dropbox
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInternalCheckPathLength(t *testing.T) {
|
||||||
|
rep := func(n int, r rune) (out string) {
|
||||||
|
rs := make([]rune, n)
|
||||||
|
for i := range rs {
|
||||||
|
rs[i] = r
|
||||||
|
}
|
||||||
|
return string(rs)
|
||||||
|
}
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
ok bool
|
||||||
|
}{
|
||||||
|
{in: "", ok: true},
|
||||||
|
{in: rep(maxFileNameLength, 'a'), ok: true},
|
||||||
|
{in: rep(maxFileNameLength+1, 'a'), ok: false},
|
||||||
|
{in: rep(maxFileNameLength, '£'), ok: true},
|
||||||
|
{in: rep(maxFileNameLength+1, '£'), ok: false},
|
||||||
|
{in: rep(maxFileNameLength, '☺'), ok: true},
|
||||||
|
{in: rep(maxFileNameLength+1, '☺'), ok: false},
|
||||||
|
{in: rep(maxFileNameLength, '你'), ok: true},
|
||||||
|
{in: rep(maxFileNameLength+1, '你'), ok: false},
|
||||||
|
{in: "/ok/ok", ok: true},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength, 'a') + "/ok", ok: true},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength+1, 'a') + "/ok", ok: false},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength, '£') + "/ok", ok: true},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength+1, '£') + "/ok", ok: false},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength, '☺') + "/ok", ok: true},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength+1, '☺') + "/ok", ok: false},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength, '你') + "/ok", ok: true},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength+1, '你') + "/ok", ok: false},
|
||||||
|
} {
|
||||||
|
|
||||||
|
err := checkPathLength(test.in)
|
||||||
|
assert.Equal(t, test.ok, err == nil, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,8 +4,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -27,16 +29,74 @@ var retryErrorCodes = []int{
|
|||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
// Detect this error which the integration tests provoke
|
||||||
|
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||||
|
//
|
||||||
|
// https://1fichier.com/api.html
|
||||||
|
//
|
||||||
|
// file/ls.cgi is limited :
|
||||||
|
//
|
||||||
|
// Warning (can be changed in case of abuses) :
|
||||||
|
// List all files of the account is limited to 1 request per hour.
|
||||||
|
// List folders is limited to 5 000 results and 1 request per folder per 30s.
|
||||||
|
if err != nil && strings.Contains(err.Error(), "Flood detected") {
|
||||||
|
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
}
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||||
|
|
||||||
|
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, leaf string, directoryID string, err error) {
|
||||||
|
// Create the directory for the object if it doesn't exist
|
||||||
|
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Temporary Object under construction
|
||||||
|
o = &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
}
|
||||||
|
return o, leaf, directoryID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
||||||
|
request := FileInfoRequest{
|
||||||
|
URL: url,
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/file/info.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
var file File
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(ctx, &opts, &request, &file)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't read file info")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &file, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybe do some actual validation later if necessary
|
||||||
|
func validToken(token *GetTokenResponse) bool {
|
||||||
|
return token.Status == "OK"
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||||
request := DownloadRequest{
|
request := DownloadRequest{
|
||||||
URL: url,
|
URL: url,
|
||||||
Single: 1,
|
Single: 1,
|
||||||
|
Pass: f.opt.FilePassword,
|
||||||
}
|
}
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -46,7 +106,8 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
|||||||
var token GetTokenResponse
|
var token GetTokenResponse
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||||
return shouldRetry(resp, err)
|
doretry, err := shouldRetry(ctx, resp, err)
|
||||||
|
return doretry || !validToken(&token), err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
return nil, errors.Wrap(err, "couldn't list files")
|
||||||
@@ -65,16 +126,22 @@ func fileFromSharedFile(file *SharedFile) File {
|
|||||||
|
|
||||||
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
RootURL: "https://1fichier.com/dir/",
|
RootURL: "https://1fichier.com/dir/",
|
||||||
Path: id,
|
Path: id,
|
||||||
Parameters: map[string][]string{"json": {"1"}},
|
Parameters: map[string][]string{"json": {"1"}},
|
||||||
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
|
}
|
||||||
|
if f.opt.FolderPassword != "" {
|
||||||
|
opts.Method = "POST"
|
||||||
|
opts.Parameters = nil
|
||||||
|
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
|
||||||
}
|
}
|
||||||
|
|
||||||
var sharedFiles SharedFolderResponse
|
var sharedFiles SharedFolderResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
|
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
return nil, errors.Wrap(err, "couldn't list files")
|
||||||
@@ -103,7 +170,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
|||||||
filesList = &FilesList{}
|
filesList = &FilesList{}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
|
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
return nil, errors.Wrap(err, "couldn't list files")
|
||||||
@@ -131,7 +198,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
|||||||
foldersList = &FoldersList{}
|
foldersList = &FoldersList{}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
|
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list folders")
|
return nil, errors.Wrap(err, "couldn't list folders")
|
||||||
@@ -225,7 +292,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
|
|||||||
response = &MakeFolderResponse{}
|
response = &MakeFolderResponse{}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
|
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't create folder")
|
return nil, errors.Wrap(err, "couldn't create folder")
|
||||||
@@ -252,13 +319,13 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
|
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||||
}
|
}
|
||||||
if response.Status != "OK" {
|
if response.Status != "OK" {
|
||||||
return nil, errors.New("Can't remove non-empty dir")
|
return nil, errors.Errorf("can't remove folder: %s", response.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||||
@@ -281,7 +348,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
|||||||
response = &GenericOKResponse{}
|
response = &GenericOKResponse{}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -293,6 +360,84 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
|||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename string) (response *MoveFileResponse, err error) {
|
||||||
|
request := &MoveFileRequest{
|
||||||
|
URLs: []string{url},
|
||||||
|
FolderID: folderID,
|
||||||
|
Rename: rename,
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/file/mv.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = &MoveFileResponse{}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't copy file")
|
||||||
|
}
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
||||||
|
request := &CopyFileRequest{
|
||||||
|
URLs: []string{url},
|
||||||
|
FolderID: folderID,
|
||||||
|
Rename: rename,
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/file/cp.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = &CopyFileResponse{}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't copy file")
|
||||||
|
}
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
|
||||||
|
request := &RenameFileRequest{
|
||||||
|
URLs: []RenameFileURL{
|
||||||
|
{
|
||||||
|
URL: url,
|
||||||
|
Filename: newName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/file/rename.cgi",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = &RenameFileResponse{}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't rename file")
|
||||||
|
}
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||||
// fs.Debugf(f, "Requesting Upload node")
|
// fs.Debugf(f, "Requesting Upload node")
|
||||||
|
|
||||||
@@ -305,7 +450,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
|||||||
response = &GetUploadNodeResponse{}
|
response = &GetUploadNodeResponse{}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||||
@@ -348,7 +493,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
|||||||
|
|
||||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
|
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -382,7 +527,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
|||||||
response = &EndFileUploadResponse{}
|
response = &EndFileUploadResponse{}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -35,9 +35,7 @@ func init() {
|
|||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "fichier",
|
Name: "fichier",
|
||||||
Description: "1Fichier",
|
Description: "1Fichier",
|
||||||
Config: func(name string, config configmap.Mapper) {
|
NewFs: NewFs,
|
||||||
},
|
|
||||||
NewFs: NewFs,
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||||
Name: "api_key",
|
Name: "api_key",
|
||||||
@@ -46,6 +44,18 @@ func init() {
|
|||||||
Name: "shared_folder",
|
Name: "shared_folder",
|
||||||
Required: false,
|
Required: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Help: "If you want to download a shared file that is password protected, add this parameter",
|
||||||
|
Name: "file_password",
|
||||||
|
Required: false,
|
||||||
|
Advanced: true,
|
||||||
|
IsPassword: true,
|
||||||
|
}, {
|
||||||
|
Help: "If you want to list the files in a shared folder that is password protected, add this parameter",
|
||||||
|
Name: "folder_password",
|
||||||
|
Required: false,
|
||||||
|
Advanced: true,
|
||||||
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -77,9 +87,11 @@ func init() {
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
APIKey string `config:"api_key"`
|
APIKey string `config:"api_key"`
|
||||||
SharedFolder string `config:"shared_folder"`
|
SharedFolder string `config:"shared_folder"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
FilePassword string `config:"file_password"`
|
||||||
|
FolderPassword string `config:"folder_password"`
|
||||||
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs is the interface a cloud storage system must provide
|
// Fs is the interface a cloud storage system must provide
|
||||||
@@ -167,7 +179,7 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
//
|
//
|
||||||
// On Windows avoid single character remote names as they can be mixed
|
// On Windows avoid single character remote names as they can be mixed
|
||||||
// up with drive letters.
|
// up with drive letters.
|
||||||
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(config, opt)
|
err := configstruct.Set(config, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -186,16 +198,17 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
|||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
||||||
baseClient: &http.Client{},
|
baseClient: &http.Client{},
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
ReadMimeType: true,
|
||||||
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
client := fshttp.NewClient(fs.Config)
|
client := fshttp.NewClient(ctx)
|
||||||
|
|
||||||
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||||
|
|
||||||
@@ -203,8 +216,6 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
|||||||
|
|
||||||
f.dirCache = dircache.New(root, rootID, f)
|
f.dirCache = dircache.New(root, rootID, f)
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(ctx, false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -227,7 +238,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.features.Fill(&tempF)
|
f.features.Fill(ctx, &tempF)
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
// XXX: update the old f here instead of returning tempF, since
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
// `features` were already filled with functions having *f as a receiver.
|
||||||
// See https://github.com/rclone/rclone/issues/2182
|
// See https://github.com/rclone/rclone/issues/2182
|
||||||
@@ -306,10 +317,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
@@ -323,7 +334,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
if size > int64(100e9) {
|
if size > int64(300e9) {
|
||||||
return nil, errors.New("File too big, cant upload")
|
return nil, errors.New("File too big, cant upload")
|
||||||
} else if size == 0 {
|
} else if size == 0 {
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
return nil, fs.ErrorCantUploadEmptyFiles
|
||||||
@@ -349,8 +360,10 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(fileUploadResponse.Links) != 1 {
|
if len(fileUploadResponse.Links) == 0 {
|
||||||
return nil, errors.New("unexpected amount of files")
|
return nil, errors.New("upload response not found")
|
||||||
|
} else if len(fileUploadResponse.Links) > 1 {
|
||||||
|
fs.Debugf(remote, "Multiple upload responses found, using the first")
|
||||||
}
|
}
|
||||||
|
|
||||||
link := fileUploadResponse.Links[0]
|
link := fileUploadResponse.Links[0]
|
||||||
@@ -364,7 +377,6 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
|||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
file: File{
|
file: File{
|
||||||
ACL: 0,
|
|
||||||
CDN: 0,
|
CDN: 0,
|
||||||
Checksum: link.Whirlpool,
|
Checksum: link.Whirlpool,
|
||||||
ContentType: "",
|
ContentType: "",
|
||||||
@@ -417,9 +429,109 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Move src to this remote using server side move operations.
|
||||||
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
|
srcObj, ok := src.(*Object)
|
||||||
|
if !ok {
|
||||||
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
|
return nil, fs.ErrorCantMove
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find current directory ID
|
||||||
|
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create temporary object
|
||||||
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it is in the correct directory, just rename it
|
||||||
|
var url string
|
||||||
|
if currentDirectoryID == directoryID {
|
||||||
|
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't rename file")
|
||||||
|
}
|
||||||
|
if resp.Status != "OK" {
|
||||||
|
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
|
||||||
|
}
|
||||||
|
url = resp.URLs[0].URL
|
||||||
|
} else {
|
||||||
|
folderID, err := strconv.Atoi(directoryID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't move file")
|
||||||
|
}
|
||||||
|
if resp.Status != "OK" {
|
||||||
|
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
||||||
|
}
|
||||||
|
url = resp.URLs[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := f.readFileInfo(ctx, url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("couldn't read file data")
|
||||||
|
}
|
||||||
|
dstObj.setMetaData(*file)
|
||||||
|
return dstObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy src to this remote using server side move operations.
|
||||||
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
|
srcObj, ok := src.(*Object)
|
||||||
|
if !ok {
|
||||||
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
|
return nil, fs.ErrorCantMove
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create temporary object
|
||||||
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
folderID, err := strconv.Atoi(directoryID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't move file")
|
||||||
|
}
|
||||||
|
if resp.Status != "OK" {
|
||||||
|
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("couldn't read file data")
|
||||||
|
}
|
||||||
|
dstObj.setMetaData(*file)
|
||||||
|
return dstObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||||
|
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||||
|
o, err := f.NewObject(ctx, remote)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return o.(*Object).file.URL, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.Mover = (*Fs)(nil)
|
||||||
|
_ fs.Copier = (*Fs)(nil)
|
||||||
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
_ dircache.DirCacher = (*Fs)(nil)
|
_ dircache.DirCacher = (*Fs)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,13 +4,11 @@ package fichier
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fs.Config.LogLevel = fs.LogLevelDebug
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestFichier:",
|
RemoteName: "TestFichier:",
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -72,6 +72,10 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
|
|||||||
//return errors.New("setting modtime is not supported for 1fichier remotes")
|
//return errors.New("setting modtime is not supported for 1fichier remotes")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o *Object) setMetaData(file File) {
|
||||||
|
o.file = file
|
||||||
|
}
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||||
fs.FixRangeOption(options, o.file.Size)
|
fs.FixRangeOption(options, o.file.Size)
|
||||||
@@ -90,7 +94,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
|||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.rest.Call(ctx, &opts)
|
resp, err = o.fs.rest.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,5 +1,10 @@
|
|||||||
package fichier
|
package fichier
|
||||||
|
|
||||||
|
// FileInfoRequest is the request structure of the corresponding request
|
||||||
|
type FileInfoRequest struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
// ListFolderRequest is the request structure of the corresponding request
|
// ListFolderRequest is the request structure of the corresponding request
|
||||||
type ListFolderRequest struct {
|
type ListFolderRequest struct {
|
||||||
FolderID int `json:"folder_id"`
|
FolderID int `json:"folder_id"`
|
||||||
@@ -14,6 +19,7 @@ type ListFilesRequest struct {
|
|||||||
type DownloadRequest struct {
|
type DownloadRequest struct {
|
||||||
URL string `json:"url"`
|
URL string `json:"url"`
|
||||||
Single int `json:"single"`
|
Single int `json:"single"`
|
||||||
|
Pass string `json:"pass,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveFolderRequest is the request structure of the corresponding request
|
// RemoveFolderRequest is the request structure of the corresponding request
|
||||||
@@ -49,6 +55,65 @@ type MakeFolderResponse struct {
|
|||||||
FolderID int `json:"folder_id"`
|
FolderID int `json:"folder_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MoveFileRequest is the request structure of the corresponding request
|
||||||
|
type MoveFileRequest struct {
|
||||||
|
URLs []string `json:"urls"`
|
||||||
|
FolderID int `json:"destination_folder_id"`
|
||||||
|
Rename string `json:"rename,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFileResponse is the response structure of the corresponding request
|
||||||
|
type MoveFileResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
URLs []string `json:"urls"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFileRequest is the request structure of the corresponding request
|
||||||
|
type CopyFileRequest struct {
|
||||||
|
URLs []string `json:"urls"`
|
||||||
|
FolderID int `json:"folder_id"`
|
||||||
|
Rename string `json:"rename,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFileResponse is the response structure of the corresponding request
|
||||||
|
type CopyFileResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Copied int `json:"copied"`
|
||||||
|
URLs []FileCopy `json:"urls"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileCopy is used in the the CopyFileResponse
|
||||||
|
type FileCopy struct {
|
||||||
|
FromURL string `json:"from_url"`
|
||||||
|
ToURL string `json:"to_url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameFileURL is the data structure to rename a single file
|
||||||
|
type RenameFileURL struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameFileRequest is the request structure of the corresponding request
|
||||||
|
type RenameFileRequest struct {
|
||||||
|
URLs []RenameFileURL `json:"urls"`
|
||||||
|
Pretty int `json:"pretty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameFileResponse is the response structure of the corresponding request
|
||||||
|
type RenameFileResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Renamed int `json:"renamed"`
|
||||||
|
URLs []struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
OldFilename string `json:"old_filename"`
|
||||||
|
NewFilename string `json:"new_filename"`
|
||||||
|
} `json:"urls"`
|
||||||
|
}
|
||||||
|
|
||||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||||
type GetUploadNodeResponse struct {
|
type GetUploadNodeResponse struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
@@ -86,7 +151,6 @@ type EndFileUploadResponse struct {
|
|||||||
|
|
||||||
// File is the structure how 1Fichier returns a File
|
// File is the structure how 1Fichier returns a File
|
||||||
type File struct {
|
type File struct {
|
||||||
ACL int `json:"acl"`
|
|
||||||
CDN int `json:"cdn"`
|
CDN int `json:"cdn"`
|
||||||
Checksum string `json:"checksum"`
|
Checksum string `json:"checksum"`
|
||||||
ContentType string `json:"content-type"`
|
ContentType string `json:"content-type"`
|
||||||
|
|||||||
409
backend/filefabric/api/types.go
Normal file
409
backend/filefabric/api/types.go
Normal file
@@ -0,0 +1,409 @@
|
|||||||
|
// Package api has type definitions for filefabric
|
||||||
|
//
|
||||||
|
// Converted from the API responses with help from https://mholt.github.io/json-to-go/
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TimeFormat for parameters (UTC)
|
||||||
|
timeFormatParameters = `2006-01-02 15:04:05`
|
||||||
|
// "2020-08-11 10:10:04" for JSON parsing
|
||||||
|
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Time represents represents date and time information for the
|
||||||
|
// filefabric API
|
||||||
|
type Time time.Time
|
||||||
|
|
||||||
|
// MarshalJSON turns a Time into JSON (in UTC)
|
||||||
|
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||||
|
timeString := (*time.Time)(t).UTC().Format(timeFormatJSON)
|
||||||
|
return []byte(timeString), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var zeroTime = []byte(`"0000-00-00 00:00:00"`)
|
||||||
|
|
||||||
|
// UnmarshalJSON turns JSON into a Time (in UTC)
|
||||||
|
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||||
|
// Set a Zero time.Time if we receive a zero time input
|
||||||
|
if bytes.Equal(data, zeroTime) {
|
||||||
|
*t = Time(time.Time{})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
newT, err := time.Parse(timeFormatJSON, string(data))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = Time(newT)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String turns a Time into a string in UTC suitable for the API
|
||||||
|
// parameters
|
||||||
|
func (t Time) String() string {
|
||||||
|
return time.Time(t).UTC().Format(timeFormatParameters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int represents an integer which can be represented in JSON as a
|
||||||
|
// quoted integer or an integer.
|
||||||
|
type Int int
|
||||||
|
|
||||||
|
// MarshalJSON turns a Int into JSON
|
||||||
|
func (i *Int) MarshalJSON() (out []byte, err error) {
|
||||||
|
return json.Marshal((*int)(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON turns JSON into a Int
|
||||||
|
func (i *Int) UnmarshalJSON(data []byte) error {
|
||||||
|
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
|
||||||
|
data = data[1 : len(data)-1]
|
||||||
|
}
|
||||||
|
return json.Unmarshal(data, (*int)(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status return returned in all status responses
|
||||||
|
type Status struct {
|
||||||
|
Code string `json:"status"`
|
||||||
|
Message string `json:"statusmessage"`
|
||||||
|
TaskID string `json:"taskid"`
|
||||||
|
// Warning string `json:"warning"` // obsolete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status statisfies the error interface
|
||||||
|
func (e *Status) Error() string {
|
||||||
|
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OK returns true if the status is all good
|
||||||
|
func (e *Status) OK() bool {
|
||||||
|
return e.Code == "ok"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCode returns the status code if any
|
||||||
|
func (e *Status) GetCode() string {
|
||||||
|
return e.Code
|
||||||
|
}
|
||||||
|
|
||||||
|
// OKError defines an interface for items which can be OK or be an error
|
||||||
|
type OKError interface {
|
||||||
|
error
|
||||||
|
OK() bool
|
||||||
|
GetCode() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check Status satisfies the OKError interface
|
||||||
|
var _ OKError = (*Status)(nil)
|
||||||
|
|
||||||
|
// EmptyResponse is response which just returns the error condition
|
||||||
|
type EmptyResponse struct {
|
||||||
|
Status
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTokenByAuthTokenResponse is the response to getTokenByAuthToken
|
||||||
|
type GetTokenByAuthTokenResponse struct {
|
||||||
|
Status
|
||||||
|
Token string `json:"token"`
|
||||||
|
UserID string `json:"userid"`
|
||||||
|
AllowLoginRemember string `json:"allowloginremember"`
|
||||||
|
LastLogin Time `json:"lastlogin"`
|
||||||
|
AutoLoginCode string `json:"autologincode"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplianceInfo is the response to getApplianceInfo
|
||||||
|
type ApplianceInfo struct {
|
||||||
|
Status
|
||||||
|
Sitetitle string `json:"sitetitle"`
|
||||||
|
OauthLoginSupport string `json:"oauthloginsupport"`
|
||||||
|
IsAppliance string `json:"isappliance"`
|
||||||
|
SoftwareVersion string `json:"softwareversion"`
|
||||||
|
SoftwareVersionLabel string `json:"softwareversionlabel"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFolderContentsResponse is returned from getFolderContents
|
||||||
|
type GetFolderContentsResponse struct {
|
||||||
|
Status
|
||||||
|
Total int `json:"total,string"`
|
||||||
|
Items []Item `json:"filelist"`
|
||||||
|
Folder Item `json:"folder"`
|
||||||
|
From Int `json:"from"`
|
||||||
|
//Count int `json:"count"`
|
||||||
|
Pid string `json:"pid"`
|
||||||
|
RefreshResult Status `json:"refreshresult"`
|
||||||
|
// Curfolder Item `json:"curfolder"` - sometimes returned as "ROOT"?
|
||||||
|
Parents []Item `json:"parents"`
|
||||||
|
CustomPermissions CustomPermissions `json:"custompermissions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ItemType determine whether it is a file or a folder
|
||||||
|
type ItemType uint8
|
||||||
|
|
||||||
|
// Types of things in Item
|
||||||
|
const (
|
||||||
|
ItemTypeFile ItemType = 0
|
||||||
|
ItemTypeFolder ItemType = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// Item ia a File or a Folder
|
||||||
|
type Item struct {
|
||||||
|
ID string `json:"fi_id"`
|
||||||
|
PID string `json:"fi_pid"`
|
||||||
|
// UID string `json:"fi_uid"`
|
||||||
|
Name string `json:"fi_name"`
|
||||||
|
// S3Name string `json:"fi_s3name"`
|
||||||
|
// Extension string `json:"fi_extension"`
|
||||||
|
// Description string `json:"fi_description"`
|
||||||
|
Type ItemType `json:"fi_type,string"`
|
||||||
|
// Created Time `json:"fi_created"`
|
||||||
|
Size int64 `json:"fi_size,string"`
|
||||||
|
ContentType string `json:"fi_contenttype"`
|
||||||
|
// Tags string `json:"fi_tags"`
|
||||||
|
// MainCode string `json:"fi_maincode"`
|
||||||
|
// Public int `json:"fi_public,string"`
|
||||||
|
// Provider string `json:"fi_provider"`
|
||||||
|
// ProviderFolder string `json:"fi_providerfolder"` // folder
|
||||||
|
// Encrypted int `json:"fi_encrypted,string"`
|
||||||
|
// StructType string `json:"fi_structtype"`
|
||||||
|
// Bname string `json:"fi_bname"` // folder
|
||||||
|
// OrgID string `json:"fi_orgid"`
|
||||||
|
// Favorite int `json:"fi_favorite,string"`
|
||||||
|
// IspartOf string `json:"fi_ispartof"` // folder
|
||||||
|
Modified Time `json:"fi_modified"`
|
||||||
|
// LastAccessed Time `json:"fi_lastaccessed"`
|
||||||
|
// Hits int64 `json:"fi_hits,string"`
|
||||||
|
// IP string `json:"fi_ip"` // folder
|
||||||
|
// BigDescription string `json:"fi_bigdescription"`
|
||||||
|
LocalTime Time `json:"fi_localtime"`
|
||||||
|
// OrgfolderID string `json:"fi_orgfolderid"`
|
||||||
|
// StorageIP string `json:"fi_storageip"` // folder
|
||||||
|
// RemoteTime Time `json:"fi_remotetime"`
|
||||||
|
// ProviderOptions string `json:"fi_provideroptions"`
|
||||||
|
// Access string `json:"fi_access"`
|
||||||
|
// Hidden string `json:"fi_hidden"` // folder
|
||||||
|
// VersionOf string `json:"fi_versionof"`
|
||||||
|
Trash bool `json:"trash"`
|
||||||
|
// Isbucket string `json:"isbucket"` // filelist
|
||||||
|
SubFolders int64 `json:"subfolders"` // folder
|
||||||
|
}
|
||||||
|
|
||||||
|
// ItemFields is a | separated list of fields in Item
|
||||||
|
var ItemFields = mustFields(Item{})
|
||||||
|
|
||||||
|
// fields returns the JSON fields in use by opt as a | separated
|
||||||
|
// string.
|
||||||
|
func fields(opt interface{}) (pipeTags string, err error) {
|
||||||
|
var tags []string
|
||||||
|
def := reflect.ValueOf(opt)
|
||||||
|
defType := def.Type()
|
||||||
|
for i := 0; i < def.NumField(); i++ {
|
||||||
|
field := defType.Field(i)
|
||||||
|
tag, ok := field.Tag.Lookup("json")
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if comma := strings.IndexRune(tag, ','); comma >= 0 {
|
||||||
|
tag = tag[:comma]
|
||||||
|
}
|
||||||
|
if tag == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tags = append(tags, tag)
|
||||||
|
}
|
||||||
|
return strings.Join(tags, "|"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustFields returns the JSON fields in use by opt as a | separated
|
||||||
|
// string. It panics on failure.
|
||||||
|
func mustFields(opt interface{}) string {
|
||||||
|
tags, err := fields(opt)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return tags
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomPermissions is returned as part of GetFolderContentsResponse
|
||||||
|
type CustomPermissions struct {
|
||||||
|
Upload string `json:"upload"`
|
||||||
|
CreateSubFolder string `json:"createsubfolder"`
|
||||||
|
Rename string `json:"rename"`
|
||||||
|
Delete string `json:"delete"`
|
||||||
|
Move string `json:"move"`
|
||||||
|
ManagePermissions string `json:"managepermissions"`
|
||||||
|
ListOnly string `json:"listonly"`
|
||||||
|
VisibleInTrash string `json:"visibleintrash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoCreateNewFolderResponse is response from foCreateNewFolder
|
||||||
|
type DoCreateNewFolderResponse struct {
|
||||||
|
Status
|
||||||
|
Item Item `json:"file"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoInitUploadResponse is response from doInitUpload
|
||||||
|
type DoInitUploadResponse struct {
|
||||||
|
Status
|
||||||
|
ProviderID string `json:"providerid"`
|
||||||
|
UploadCode string `json:"uploadcode"`
|
||||||
|
FileType string `json:"filetype"`
|
||||||
|
DirectUploadSupport string `json:"directuploadsupport"`
|
||||||
|
ResumeAllowed string `json:"resumeallowed"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi
|
||||||
|
//
|
||||||
|
// Sometimes the response is returned as XML and sometimes as JSON
|
||||||
|
type UploaderResponse struct {
|
||||||
|
FileSize int64 `xml:"filesize" json:"filesize,string"`
|
||||||
|
MD5 string `xml:"md5" json:"md5"`
|
||||||
|
Success string `xml:"success" json:"success"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadStatus is returned from getUploadStatus
|
||||||
|
type UploadStatus struct {
|
||||||
|
Status
|
||||||
|
UploadCode string `json:"uploadcode"`
|
||||||
|
Metafile string `json:"metafile"`
|
||||||
|
Percent int `json:"percent,string"`
|
||||||
|
Uploaded int64 `json:"uploaded,string"`
|
||||||
|
Size int64 `json:"size,string"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
Nofile string `json:"nofile"`
|
||||||
|
Completed string `json:"completed"`
|
||||||
|
Completsuccess string `json:"completsuccess"`
|
||||||
|
Completerror string `json:"completerror"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoCompleteUploadResponse is the response to doCompleteUpload
|
||||||
|
type DoCompleteUploadResponse struct {
|
||||||
|
Status
|
||||||
|
UploadedSize int64 `json:"uploadedsize,string"`
|
||||||
|
StorageIP string `json:"storageip"`
|
||||||
|
UploadedName string `json:"uploadedname"`
|
||||||
|
// Versioned []interface{} `json:"versioned"`
|
||||||
|
// VersionedID int `json:"versionedid"`
|
||||||
|
// Comment interface{} `json:"comment"`
|
||||||
|
File Item `json:"file"`
|
||||||
|
// UsSize string `json:"us_size"`
|
||||||
|
// PaSize string `json:"pa_size"`
|
||||||
|
// SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Providers is returned as part of UploadResponse
|
||||||
|
type Providers struct {
|
||||||
|
Max string `json:"max"`
|
||||||
|
Used string `json:"used"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Private string `json:"private"`
|
||||||
|
Limit string `json:"limit"`
|
||||||
|
Percent int `json:"percent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total is returned as part of UploadResponse
|
||||||
|
type Total struct {
|
||||||
|
Max string `json:"max"`
|
||||||
|
Used string `json:"used"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Priused string `json:"priused"`
|
||||||
|
Primax string `json:"primax"`
|
||||||
|
Limit string `json:"limit"`
|
||||||
|
Percent int `json:"percent"`
|
||||||
|
Pripercent int `json:"pripercent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadResponse is returned as part of SpaceInfo
|
||||||
|
type UploadResponse struct {
|
||||||
|
Providers []Providers `json:"providers"`
|
||||||
|
Total Total `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpaceInfo is returned as part of DoCompleteUploadResponse
|
||||||
|
type SpaceInfo struct {
|
||||||
|
Response UploadResponse `json:"response"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteResponse is returned from doDeleteFile
|
||||||
|
type DeleteResponse struct {
|
||||||
|
Status
|
||||||
|
Deleted []string `json:"deleted"`
|
||||||
|
Errors []interface{} `json:"errors"`
|
||||||
|
ID string `json:"fi_id"`
|
||||||
|
BackgroundTask int `json:"backgroundtask"`
|
||||||
|
UsSize string `json:"us_size"`
|
||||||
|
PaSize string `json:"pa_size"`
|
||||||
|
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileResponse is returned from doRenameFile
|
||||||
|
type FileResponse struct {
|
||||||
|
Status
|
||||||
|
Item Item `json:"file"`
|
||||||
|
Exists string `json:"exists"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFilesResponse is returned from doMoveFiles
|
||||||
|
type MoveFilesResponse struct {
|
||||||
|
Status
|
||||||
|
Filesleft string `json:"filesleft"`
|
||||||
|
Addedtobackground string `json:"addedtobackground"`
|
||||||
|
Moved string `json:"moved"`
|
||||||
|
Item Item `json:"file"`
|
||||||
|
IDs []string `json:"fi_ids"`
|
||||||
|
Length int `json:"length"`
|
||||||
|
DirID string `json:"dir_id"`
|
||||||
|
MovedObjects []Item `json:"movedobjects"`
|
||||||
|
// FolderTasks []interface{} `json:"foldertasks"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TasksResponse is the response to getUserBackgroundTasks
|
||||||
|
type TasksResponse struct {
|
||||||
|
Status
|
||||||
|
Tasks []Task `json:"tasks"`
|
||||||
|
Total string `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BtData is part of TasksResponse
|
||||||
|
type BtData struct {
|
||||||
|
Callback string `json:"callback"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Task describes a task returned in TasksResponse
|
||||||
|
type Task struct {
|
||||||
|
BtID string `json:"bt_id"`
|
||||||
|
UsID string `json:"us_id"`
|
||||||
|
BtType string `json:"bt_type"`
|
||||||
|
BtData BtData `json:"bt_data"`
|
||||||
|
BtStatustext string `json:"bt_statustext"`
|
||||||
|
BtStatusdata string `json:"bt_statusdata"`
|
||||||
|
BtMessage string `json:"bt_message"`
|
||||||
|
BtProcent string `json:"bt_procent"`
|
||||||
|
BtAdded string `json:"bt_added"`
|
||||||
|
BtStatus string `json:"bt_status"`
|
||||||
|
BtCompleted string `json:"bt_completed"`
|
||||||
|
BtTitle string `json:"bt_title"`
|
||||||
|
BtCredentials string `json:"bt_credentials"`
|
||||||
|
BtHidden string `json:"bt_hidden"`
|
||||||
|
BtAutoremove string `json:"bt_autoremove"`
|
||||||
|
BtDevsite string `json:"bt_devsite"`
|
||||||
|
BtPriority string `json:"bt_priority"`
|
||||||
|
BtReport string `json:"bt_report"`
|
||||||
|
BtSitemarker string `json:"bt_sitemarker"`
|
||||||
|
BtExecuteafter string `json:"bt_executeafter"`
|
||||||
|
BtCompletestatus string `json:"bt_completestatus"`
|
||||||
|
BtSubtype string `json:"bt_subtype"`
|
||||||
|
BtCanceled string `json:"bt_canceled"`
|
||||||
|
Callback string `json:"callback"`
|
||||||
|
CanBeCanceled bool `json:"canbecanceled"`
|
||||||
|
CanBeRestarted bool `json:"canberestarted"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Settings string `json:"settings"`
|
||||||
|
}
|
||||||
1352
backend/filefabric/filefabric.go
Normal file
1352
backend/filefabric/filefabric.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/filefabric/filefabric_test.go
Normal file
17
backend/filefabric/filefabric_test.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
// Test filefabric filesystem interface
|
||||||
|
package filefabric_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/filefabric"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestFileFabric:",
|
||||||
|
NilObject: (*filefabric.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"io"
|
"io"
|
||||||
|
"net"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -16,16 +16,30 @@ import (
|
|||||||
"github.com/jlaffaye/ftp"
|
"github.com/jlaffaye/ftp"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
currentUser = env.CurrentUser()
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
minSleep = 10 * time.Millisecond
|
||||||
|
maxSleep = 2 * time.Second
|
||||||
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -42,7 +56,7 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
Help: "FTP username, leave blank for current username, " + currentUser,
|
||||||
}, {
|
}, {
|
||||||
Name: "port",
|
Name: "port",
|
||||||
Help: "FTP port, leave blank to use default (21)",
|
Help: "FTP port, leave blank to use default (21)",
|
||||||
@@ -53,16 +67,16 @@ func init() {
|
|||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "tls",
|
Name: "tls",
|
||||||
Help: `Use FTPS over TLS (Implicit)
|
Help: `Use Implicit FTPS (FTP over TLS)
|
||||||
When using implicit FTP over TLS the client will connect using TLS
|
When using implicit FTP over TLS the client connects using TLS
|
||||||
right from the start, which in turn breaks the compatibility with
|
right from the start which breaks compatibility with
|
||||||
non-TLS-aware servers. This is usually served over port 990 rather
|
non-TLS-aware servers. This is usually served over port 990 rather
|
||||||
than port 21. Cannot be used in combination with explicit FTP.`,
|
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "explicit_tls",
|
Name: "explicit_tls",
|
||||||
Help: `Use FTP over TLS (Explicit)
|
Help: `Use Explicit FTPS (FTP over TLS)
|
||||||
When using explicit FTP over TLS the client explicitly request
|
When using explicit FTP over TLS the client explicitly requests
|
||||||
security from the server in order to upgrade a plain text connection
|
security from the server in order to upgrade a plain text connection
|
||||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
@@ -81,6 +95,27 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
|||||||
Help: "Disable using EPSV even if server advertises support",
|
Help: "Disable using EPSV even if server advertises support",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "disable_mlsd",
|
||||||
|
Help: "Disable using MLSD even if server advertises support",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "idle_timeout",
|
||||||
|
Default: fs.Duration(60 * time.Second),
|
||||||
|
Help: `Max time before closing idle connections
|
||||||
|
|
||||||
|
If no connections have been returned to the connection pool in the time
|
||||||
|
given, rclone will empty the connection pool.
|
||||||
|
|
||||||
|
Set to 0 to keep connections indefinitely.
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "close_timeout",
|
||||||
|
Help: "Maximum time to wait for a response to close.",
|
||||||
|
Default: fs.Duration(60 * time.Second),
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -107,22 +142,29 @@ type Options struct {
|
|||||||
Concurrency int `config:"concurrency"`
|
Concurrency int `config:"concurrency"`
|
||||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||||
DisableEPSV bool `config:"disable_epsv"`
|
DisableEPSV bool `config:"disable_epsv"`
|
||||||
|
DisableMLSD bool `config:"disable_mlsd"`
|
||||||
|
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||||
|
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote FTP server
|
// Fs represents a remote FTP server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
ci *fs.ConfigInfo // global config
|
||||||
|
features *fs.Features // optional features
|
||||||
url string
|
url string
|
||||||
user string
|
user string
|
||||||
pass string
|
pass string
|
||||||
dialAddr string
|
dialAddr string
|
||||||
poolMu sync.Mutex
|
poolMu sync.Mutex
|
||||||
pool []*ftp.ServerConn
|
pool []*ftp.ServerConn
|
||||||
|
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||||
tokens *pacer.TokenDispenser
|
tokens *pacer.TokenDispenser
|
||||||
|
tlsConf *tls.Config
|
||||||
|
pacer *fs.Pacer // pacer for FTP connections
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes an FTP file
|
// Object describes an FTP file
|
||||||
@@ -199,51 +241,82 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
|||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||||
|
// retried. It returns the err as a convenience
|
||||||
|
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
switch errX := err.(type) {
|
||||||
|
case *textproto.Error:
|
||||||
|
switch errX.Code {
|
||||||
|
case ftp.StatusNotAvailable:
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fserrors.ShouldRetry(err), err
|
||||||
|
}
|
||||||
|
|
||||||
// Open a new connection to the FTP server.
|
// Open a new connection to the FTP server.
|
||||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
fs.Debugf(f, "Connecting to FTP server")
|
fs.Debugf(f, "Connecting to FTP server")
|
||||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
|
||||||
if f.opt.TLS && f.opt.ExplicitTLS {
|
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||||
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
dial := func(network, address string) (conn net.Conn, err error) {
|
||||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||||
} else if f.opt.TLS {
|
if f.tlsConf != nil && err == nil {
|
||||||
tlsConfig := &tls.Config{
|
conn = tls.Client(conn, f.tlsConf)
|
||||||
ServerName: f.opt.Host,
|
|
||||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
|
||||||
}
|
}
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
return
|
||||||
|
}
|
||||||
|
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
|
||||||
|
|
||||||
|
if f.opt.TLS {
|
||||||
|
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||||
|
// as a trigger for sending PSBZ and PROT options to server.
|
||||||
|
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||||
} else if f.opt.ExplicitTLS {
|
} else if f.opt.ExplicitTLS {
|
||||||
tlsConfig := &tls.Config{
|
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||||
ServerName: f.opt.Host,
|
// Initial connection needs to be cleartext for explicit TLS
|
||||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
|
||||||
}
|
}
|
||||||
if f.opt.DisableEPSV {
|
if f.opt.DisableEPSV {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||||
}
|
}
|
||||||
if fs.Config.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
if f.opt.DisableMLSD {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: fs.Config.Dump&fs.DumpAuth != 0}))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||||
}
|
}
|
||||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||||
|
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||||
|
}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
c, err = ftp.Dial(f.dialAddr, ftpConfig...)
|
||||||
|
if err != nil {
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
}
|
||||||
|
err = c.Login(f.user, f.pass)
|
||||||
|
if err != nil {
|
||||||
|
_ = c.Quit()
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
|
||||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
|
||||||
}
|
}
|
||||||
err = c.Login(f.user, f.pass)
|
return c, err
|
||||||
if err != nil {
|
|
||||||
_ = c.Quit()
|
|
||||||
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
|
|
||||||
return nil, errors.Wrap(err, "ftpConnection Login")
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get an FTP connection from the pool, or open a new one
|
// Get an FTP connection from the pool, or open a new one
|
||||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
if f.opt.Concurrency > 0 {
|
if f.opt.Concurrency > 0 {
|
||||||
f.tokens.Get()
|
f.tokens.Get()
|
||||||
}
|
}
|
||||||
|
accounting.LimitTPS(ctx)
|
||||||
f.poolMu.Lock()
|
f.poolMu.Lock()
|
||||||
if len(f.pool) > 0 {
|
if len(f.pool) > 0 {
|
||||||
c = f.pool[0]
|
c = f.pool[0]
|
||||||
@@ -253,7 +326,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
|||||||
if c != nil {
|
if c != nil {
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
c, err = f.ftpConnection()
|
c, err = f.ftpConnection(ctx)
|
||||||
if err != nil && f.opt.Concurrency > 0 {
|
if err != nil && f.opt.Concurrency > 0 {
|
||||||
f.tokens.Put()
|
f.tokens.Put()
|
||||||
}
|
}
|
||||||
@@ -292,12 +365,34 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
|||||||
}
|
}
|
||||||
f.poolMu.Lock()
|
f.poolMu.Lock()
|
||||||
f.pool = append(f.pool, c)
|
f.pool = append(f.pool, c)
|
||||||
|
if f.opt.IdleTimeout > 0 {
|
||||||
|
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||||
|
}
|
||||||
f.poolMu.Unlock()
|
f.poolMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Drain the pool of any connections
|
||||||
|
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||||
|
f.poolMu.Lock()
|
||||||
|
defer f.poolMu.Unlock()
|
||||||
|
if f.opt.IdleTimeout > 0 {
|
||||||
|
f.drain.Stop()
|
||||||
|
}
|
||||||
|
if len(f.pool) != 0 {
|
||||||
|
fs.Debugf(f, "closing %d unused connections", len(f.pool))
|
||||||
|
}
|
||||||
|
for i, c := range f.pool {
|
||||||
|
if cErr := c.Quit(); cErr != nil {
|
||||||
|
err = cErr
|
||||||
|
}
|
||||||
|
f.pool[i] = nil
|
||||||
|
}
|
||||||
|
f.pool = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||||
ctx := context.Background()
|
|
||||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -311,7 +406,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
}
|
}
|
||||||
user := opt.User
|
user := opt.User
|
||||||
if user == "" {
|
if user == "" {
|
||||||
user = os.Getenv("USER")
|
user = currentUser
|
||||||
}
|
}
|
||||||
port := opt.Port
|
port := opt.Port
|
||||||
if port == "" {
|
if port == "" {
|
||||||
@@ -323,22 +418,40 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
if opt.TLS {
|
if opt.TLS {
|
||||||
protocol = "ftps://"
|
protocol = "ftps://"
|
||||||
}
|
}
|
||||||
|
if opt.TLS && opt.ExplicitTLS {
|
||||||
|
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||||
|
}
|
||||||
|
var tlsConfig *tls.Config
|
||||||
|
if opt.TLS || opt.ExplicitTLS {
|
||||||
|
tlsConfig = &tls.Config{
|
||||||
|
ServerName: opt.Host,
|
||||||
|
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||||
|
}
|
||||||
|
}
|
||||||
u := protocol + path.Join(dialAddr+"/", root)
|
u := protocol + path.Join(dialAddr+"/", root)
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
url: u,
|
url: u,
|
||||||
user: user,
|
user: user,
|
||||||
pass: pass,
|
pass: pass,
|
||||||
dialAddr: dialAddr,
|
dialAddr: dialAddr,
|
||||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||||
|
tlsConf: tlsConfig,
|
||||||
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
// set the pool drainer timer going
|
||||||
|
if f.opt.IdleTimeout > 0 {
|
||||||
|
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||||
|
}
|
||||||
// Make a connection and pool it to return errors early
|
// Make a connection and pool it to return errors early
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "NewFs")
|
return nil, errors.Wrap(err, "NewFs")
|
||||||
}
|
}
|
||||||
@@ -365,6 +478,12 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown the backend, closing any background tasks and any
|
||||||
|
// cached connections.
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
return f.drainPool(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||||
func translateErrorFile(err error) error {
|
func translateErrorFile(err error) error {
|
||||||
switch errX := err.(type) {
|
switch errX := err.(type) {
|
||||||
@@ -409,7 +528,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// findItem finds a directory entry for the name in its parent directory
|
// findItem finds a directory entry for the name in its parent directory
|
||||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||||
fullPath := path.Join(f.root, remote)
|
fullPath := path.Join(f.root, remote)
|
||||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||||
@@ -423,7 +542,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
|||||||
dir := path.Dir(fullPath)
|
dir := path.Dir(fullPath)
|
||||||
base := path.Base(fullPath)
|
base := path.Base(fullPath)
|
||||||
|
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "findItem")
|
return nil, errors.Wrap(err, "findItem")
|
||||||
}
|
}
|
||||||
@@ -445,7 +564,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
|||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||||
entry, err := f.findItem(remote)
|
entry, err := f.findItem(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -467,8 +586,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// dirExists checks the directory pointed to by remote exists or not
|
// dirExists checks the directory pointed to by remote exists or not
|
||||||
func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||||
entry, err := f.findItem(remote)
|
entry, err := f.findItem(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "dirExists")
|
return false, errors.Wrap(err, "dirExists")
|
||||||
}
|
}
|
||||||
@@ -489,7 +608,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
|||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list")
|
return nil, errors.Wrap(err, "list")
|
||||||
}
|
}
|
||||||
@@ -510,7 +629,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Wait for List for up to Timeout seconds
|
// Wait for List for up to Timeout seconds
|
||||||
timer := time.NewTimer(fs.Config.Timeout)
|
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
|
||||||
select {
|
select {
|
||||||
case listErr = <-errchan:
|
case listErr = <-errchan:
|
||||||
timer.Stop()
|
timer.Stop()
|
||||||
@@ -527,7 +646,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// doesn't exist, so check it really doesn't exist if no
|
// doesn't exist, so check it really doesn't exist if no
|
||||||
// entries found.
|
// entries found.
|
||||||
if len(files) == 0 {
|
if len(files) == 0 {
|
||||||
exists, err := f.dirExists(dir)
|
exists, err := f.dirExists(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list")
|
return nil, errors.Wrap(err, "list")
|
||||||
}
|
}
|
||||||
@@ -580,7 +699,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||||
err := f.mkParentDir(src.Remote())
|
err := f.mkParentDir(ctx, src.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||||
}
|
}
|
||||||
@@ -598,12 +717,12 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getInfo reads the FileInfo for a path
|
// getInfo reads the FileInfo for a path
|
||||||
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||||
dir := path.Dir(remote)
|
dir := path.Dir(remote)
|
||||||
base := path.Base(remote)
|
base := path.Base(remote)
|
||||||
|
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "getInfo")
|
return nil, errors.Wrap(err, "getInfo")
|
||||||
}
|
}
|
||||||
@@ -630,12 +749,12 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mkdir makes the directory and parents using unrooted paths
|
// mkdir makes the directory and parents using unrooted paths
|
||||||
func (f *Fs) mkdir(abspath string) error {
|
func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||||
abspath = path.Clean(abspath)
|
abspath = path.Clean(abspath)
|
||||||
if abspath == "." || abspath == "/" {
|
if abspath == "." || abspath == "/" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
fi, err := f.getInfo(abspath)
|
fi, err := f.getInfo(ctx, abspath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if fi.IsDir {
|
if fi.IsDir {
|
||||||
return nil
|
return nil
|
||||||
@@ -645,11 +764,11 @@ func (f *Fs) mkdir(abspath string) error {
|
|||||||
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
||||||
}
|
}
|
||||||
parent := path.Dir(abspath)
|
parent := path.Dir(abspath)
|
||||||
err = f.mkdir(parent)
|
err = f.mkdir(ctx, parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c, connErr := f.getFtpConnection()
|
c, connErr := f.getFtpConnection(ctx)
|
||||||
if connErr != nil {
|
if connErr != nil {
|
||||||
return errors.Wrap(connErr, "mkdir")
|
return errors.Wrap(connErr, "mkdir")
|
||||||
}
|
}
|
||||||
@@ -669,23 +788,23 @@ func (f *Fs) mkdir(abspath string) error {
|
|||||||
|
|
||||||
// mkParentDir makes the parent of remote if necessary and any
|
// mkParentDir makes the parent of remote if necessary and any
|
||||||
// directories above that
|
// directories above that
|
||||||
func (f *Fs) mkParentDir(remote string) error {
|
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||||
parent := path.Dir(remote)
|
parent := path.Dir(remote)
|
||||||
return f.mkdir(path.Join(f.root, parent))
|
return f.mkdir(ctx, path.Join(f.root, parent))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the directory if it doesn't exist
|
// Mkdir creates the directory if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
return f.mkdir(root)
|
return f.mkdir(ctx, root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||||
}
|
}
|
||||||
@@ -701,11 +820,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
err := f.mkParentDir(remote)
|
err := f.mkParentDir(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||||
}
|
}
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move")
|
return nil, errors.Wrap(err, "Move")
|
||||||
}
|
}
|
||||||
@@ -725,7 +844,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -742,7 +861,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
dstPath := path.Join(f.root, dstRemote)
|
dstPath := path.Join(f.root, dstRemote)
|
||||||
|
|
||||||
// Check if destination exists
|
// Check if destination exists
|
||||||
fi, err := f.getInfo(dstPath)
|
fi, err := f.getInfo(ctx, dstPath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if fi.IsDir {
|
if fi.IsDir {
|
||||||
return fs.ErrorDirExists
|
return fs.ErrorDirExists
|
||||||
@@ -753,13 +872,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the parent directory exists
|
// Make sure the parent directory exists
|
||||||
err = f.mkdir(path.Dir(dstPath))
|
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove")
|
return errors.Wrap(err, "DirMove")
|
||||||
}
|
}
|
||||||
@@ -843,8 +962,8 @@ func (f *ftpReadCloser) Close() error {
|
|||||||
go func() {
|
go func() {
|
||||||
errchan <- f.rc.Close()
|
errchan <- f.rc.Close()
|
||||||
}()
|
}()
|
||||||
// Wait for Close for up to 60 seconds
|
// Wait for Close for up to 60 seconds by default
|
||||||
timer := time.NewTimer(60 * time.Second)
|
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
|
||||||
select {
|
select {
|
||||||
case err = <-errchan:
|
case err = <-errchan:
|
||||||
timer.Stop()
|
timer.Stop()
|
||||||
@@ -891,7 +1010,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c, err := o.fs.getFtpConnection()
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open")
|
return nil, errors.Wrap(err, "open")
|
||||||
}
|
}
|
||||||
@@ -926,11 +1045,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
fs.Debugf(o, "Removed after failed upload: %v", err)
|
fs.Debugf(o, "Removed after failed upload: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c, err := o.fs.getFtpConnection()
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update")
|
return errors.Wrap(err, "Update")
|
||||||
}
|
}
|
||||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||||
|
// Ignore error 250 here - send by some servers
|
||||||
|
if err != nil {
|
||||||
|
switch errX := err.(type) {
|
||||||
|
case *textproto.Error:
|
||||||
|
switch errX.Code {
|
||||||
|
case ftp.StatusRequestedFileActionOK:
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = c.Quit() // toss this connection to avoid sync errors
|
_ = c.Quit() // toss this connection to avoid sync errors
|
||||||
remove()
|
remove()
|
||||||
@@ -938,7 +1067,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return errors.Wrap(err, "update stor")
|
return errors.Wrap(err, "update stor")
|
||||||
}
|
}
|
||||||
o.fs.putFtpConnection(&c, nil)
|
o.fs.putFtpConnection(&c, nil)
|
||||||
o.info, err = o.fs.getInfo(path)
|
o.info, err = o.fs.getInfo(ctx, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "update getinfo")
|
return errors.Wrap(err, "update getinfo")
|
||||||
}
|
}
|
||||||
@@ -950,14 +1079,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
// defer fs.Trace(o, "")("err=%v", &err)
|
// defer fs.Trace(o, "")("err=%v", &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
// Check if it's a directory or a file
|
// Check if it's a directory or a file
|
||||||
info, err := o.fs.getInfo(path)
|
info, err := o.fs.getInfo(ctx, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info.IsDir {
|
if info.IsDir {
|
||||||
err = o.fs.Rmdir(ctx, o.remote)
|
err = o.fs.Rmdir(ctx, o.remote)
|
||||||
} else {
|
} else {
|
||||||
c, err := o.fs.getFtpConnection()
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove")
|
return errors.Wrap(err, "Remove")
|
||||||
}
|
}
|
||||||
@@ -973,5 +1102,6 @@ var (
|
|||||||
_ fs.Mover = &Fs{}
|
_ fs.Mover = &Fs{}
|
||||||
_ fs.DirMover = &Fs{}
|
_ fs.DirMover = &Fs{}
|
||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
|
_ fs.Shutdowner = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -19,9 +19,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -51,10 +51,10 @@ import (
|
|||||||
const (
|
const (
|
||||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||||
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
||||||
timeFormatIn = time.RFC3339
|
timeFormat = time.RFC3339Nano
|
||||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
metaMtime = "mtime" // key to store mtime in metadata
|
||||||
metaMtime = "mtime" // key to store mtime under in metadata
|
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
|
||||||
listChunks = 1000 // chunk size to read directory listings
|
listChunks = 1000 // chunk size to read directory listings
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -76,25 +76,18 @@ func init() {
|
|||||||
Prefix: "gcs",
|
Prefix: "gcs",
|
||||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
saFile, _ := m.Get("service_account_file")
|
saFile, _ := m.Get("service_account_file")
|
||||||
saCreds, _ := m.Get("service_account_credentials")
|
saCreds, _ := m.Get("service_account_credentials")
|
||||||
anonymous, _ := m.Get("anonymous")
|
anonymous, _ := m.Get("anonymous")
|
||||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||||
return
|
return nil, nil
|
||||||
}
|
|
||||||
err := oauthutil.Config("google cloud storage", name, m, storageConfig, nil)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
|
||||||
}
|
}
|
||||||
|
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||||
|
OAuth2Config: storageConfig,
|
||||||
|
})
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
|
||||||
Help: "Google Application Client Id\nLeave blank normally.",
|
|
||||||
}, {
|
|
||||||
Name: config.ConfigClientSecret,
|
|
||||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
|
||||||
}, {
|
|
||||||
Name: "project_number",
|
Name: "project_number",
|
||||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||||
}, {
|
}, {
|
||||||
@@ -261,7 +254,7 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|||||||
Default: (encoder.Base |
|
Default: (encoder.Base |
|
||||||
encoder.EncodeCrLf |
|
encoder.EncodeCrLf |
|
||||||
encoder.EncodeInvalidUtf8),
|
encoder.EncodeInvalidUtf8),
|
||||||
}},
|
}}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -335,7 +328,10 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetry determines whether a given err rates being retried
|
// shouldRetry determines whether a given err rates being retried
|
||||||
func shouldRetry(err error) (again bool, errOut error) {
|
func shouldRetry(ctx context.Context, err error) (again bool, errOut error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
again = false
|
again = false
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if fserrors.ShouldRetry(err) {
|
if fserrors.ShouldRetry(err) {
|
||||||
@@ -376,12 +372,12 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||||||
return o.fs.split(o.remote)
|
return o.fs.split(o.remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
|
||||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error processing credentials")
|
return nil, errors.Wrap(err, "error processing credentials")
|
||||||
}
|
}
|
||||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
|
||||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -392,8 +388,7 @@ func (f *Fs) setRoot(root string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.TODO()
|
|
||||||
var oAuthClient *http.Client
|
var oAuthClient *http.Client
|
||||||
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
@@ -418,14 +413,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||||
}
|
}
|
||||||
if opt.Anonymous {
|
if opt.Anonymous {
|
||||||
oAuthClient = &http.Client{}
|
oAuthClient = fshttp.NewClient(ctx)
|
||||||
} else if opt.ServiceAccountCredentials != "" {
|
} else if opt.ServiceAccountCredentials != "" {
|
||||||
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
|
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||||
@@ -439,7 +434,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
}
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
@@ -448,7 +443,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Create a new authorized Drive client.
|
// Create a new authorized Drive client.
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
@@ -462,7 +457,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
newRoot := path.Dir(f.root)
|
newRoot := path.Dir(f.root)
|
||||||
@@ -528,7 +523,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
var objects *storage.Objects
|
var objects *storage.Objects
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
objects, err = list.Context(ctx).Do()
|
objects, err = list.Context(ctx).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gErr, ok := err.(*googleapi.Error); ok {
|
if gErr, ok := err.(*googleapi.Error); ok {
|
||||||
@@ -571,7 +566,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
remote = path.Join(bucket, remote)
|
remote = path.Join(bucket, remote)
|
||||||
}
|
}
|
||||||
// is this a directory marker?
|
// is this a directory marker?
|
||||||
if isDirectory && object.Size == 0 {
|
if isDirectory {
|
||||||
continue // skip directory marker
|
continue // skip directory marker
|
||||||
}
|
}
|
||||||
err = fn(remote, object, false)
|
err = fn(remote, object, false)
|
||||||
@@ -631,7 +626,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||||||
var buckets *storage.Buckets
|
var buckets *storage.Buckets
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
buckets, err = listBuckets.Context(ctx).Do()
|
buckets, err = listBuckets.Context(ctx).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -757,7 +752,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
|||||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// Bucket already exists
|
// Bucket already exists
|
||||||
@@ -792,7 +787,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
|||||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||||
}
|
}
|
||||||
_, err = insertBucket.Context(ctx).Do()
|
_, err = insertBucket.Context(ctx).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
}, nil)
|
}, nil)
|
||||||
}
|
}
|
||||||
@@ -809,7 +804,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
|||||||
return f.cache.Remove(bucket, func() error {
|
return f.cache.Remove(bucket, func() error {
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -819,7 +814,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
return time.Nanosecond
|
return time.Nanosecond
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -847,20 +842,27 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
|
|
||||||
var newObject *storage.Object
|
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
if !f.opt.BucketPolicyOnly {
|
||||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||||
if !f.opt.BucketPolicyOnly {
|
}
|
||||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
var rewriteResponse *storage.RewriteResponse
|
||||||
|
for {
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||||
|
return shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
newObject, err = copyObject.Context(ctx).Do()
|
if rewriteResponse.Done {
|
||||||
return shouldRetry(err)
|
break
|
||||||
})
|
}
|
||||||
if err != nil {
|
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
|
||||||
return nil, err
|
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
|
||||||
}
|
}
|
||||||
// Set the metadata for the new object while we have it
|
// Set the metadata for the new object while we have it
|
||||||
dstObj.setMetaData(newObject)
|
dstObj.setMetaData(rewriteResponse.Resource)
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -919,7 +921,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
|||||||
// read mtime out of metadata if available
|
// read mtime out of metadata if available
|
||||||
mtimeString, ok := info.Metadata[metaMtime]
|
mtimeString, ok := info.Metadata[metaMtime]
|
||||||
if ok {
|
if ok {
|
||||||
modTime, err := time.Parse(timeFormatIn, mtimeString)
|
modTime, err := time.Parse(timeFormat, mtimeString)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
o.modTime = modTime
|
o.modTime = modTime
|
||||||
return
|
return
|
||||||
@@ -927,8 +929,19 @@ func (o *Object) setMetaData(info *storage.Object) {
|
|||||||
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fallback to GSUtil mtime
|
||||||
|
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
|
||||||
|
if ok {
|
||||||
|
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
|
||||||
|
if err == nil {
|
||||||
|
o.modTime = time.Unix(unixTimeSec, 0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Fallback to the Updated time
|
// Fallback to the Updated time
|
||||||
modTime, err := time.Parse(timeFormatIn, info.Updated)
|
modTime, err := time.Parse(timeFormat, info.Updated)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Bad time decode: %v", err)
|
fs.Logf(o, "Bad time decode: %v", err)
|
||||||
} else {
|
} else {
|
||||||
@@ -941,7 +954,7 @@ func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, er
|
|||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gErr, ok := err.(*googleapi.Error); ok {
|
if gErr, ok := err.(*googleapi.Error); ok {
|
||||||
@@ -985,7 +998,8 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
// Returns metadata for an object
|
// Returns metadata for an object
|
||||||
func metadataFromModTime(modTime time.Time) map[string]string {
|
func metadataFromModTime(modTime time.Time) map[string]string {
|
||||||
metadata := make(map[string]string, 1)
|
metadata := make(map[string]string, 1)
|
||||||
metadata[metaMtime] = modTime.Format(timeFormatOut)
|
metadata[metaMtime] = modTime.Format(timeFormat)
|
||||||
|
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
||||||
return metadata
|
return metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -997,11 +1011,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Add the mtime to the existing metadata
|
// Add the mtime to the existing metadata
|
||||||
mtime := modTime.Format(timeFormatOut)
|
|
||||||
if object.Metadata == nil {
|
if object.Metadata == nil {
|
||||||
object.Metadata = make(map[string]string, 1)
|
object.Metadata = make(map[string]string, 1)
|
||||||
}
|
}
|
||||||
object.Metadata[metaMtime] = mtime
|
object.Metadata[metaMtime] = modTime.Format(timeFormat)
|
||||||
|
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
||||||
// Copy the object to itself to update the metadata
|
// Copy the object to itself to update the metadata
|
||||||
// Using PATCH requires too many permissions
|
// Using PATCH requires too many permissions
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
@@ -1012,7 +1026,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
|||||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||||
}
|
}
|
||||||
newObject, err = copyObject.Context(ctx).Do()
|
newObject, err = copyObject.Context(ctx).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1028,11 +1042,10 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
req, err := http.NewRequest("GET", o.url, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
fs.FixRangeOption(options, o.bytes)
|
fs.FixRangeOption(options, o.bytes)
|
||||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||||
var res *http.Response
|
var res *http.Response
|
||||||
@@ -1044,7 +1057,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
_ = res.Body.Close() // ignore error
|
_ = res.Body.Close() // ignore error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1091,6 +1104,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
object.ContentLanguage = value
|
object.ContentLanguage = value
|
||||||
case "content-type":
|
case "content-type":
|
||||||
object.ContentType = value
|
object.ContentType = value
|
||||||
|
case "x-goog-storage-class":
|
||||||
|
object.StorageClass = value
|
||||||
default:
|
default:
|
||||||
const googMetaPrefix = "x-goog-meta-"
|
const googMetaPrefix = "x-goog-meta-"
|
||||||
if strings.HasPrefix(lowerKey, googMetaPrefix) {
|
if strings.HasPrefix(lowerKey, googMetaPrefix) {
|
||||||
@@ -1108,7 +1123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||||
}
|
}
|
||||||
newObject, err = insertObject.Context(ctx).Do()
|
newObject, err = insertObject.Context(ctx).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1123,7 +1138,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
golog "log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@@ -21,7 +20,6 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
@@ -55,6 +53,7 @@ const (
|
|||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||||
|
scopeAccess = 2 // position of access scope in list
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -63,7 +62,7 @@ var (
|
|||||||
Scopes: []string{
|
Scopes: []string{
|
||||||
"openid",
|
"openid",
|
||||||
"profile",
|
"profile",
|
||||||
scopeReadWrite,
|
scopeReadWrite, // this must be at position scopeAccess
|
||||||
},
|
},
|
||||||
Endpoint: google.Endpoint,
|
Endpoint: google.Endpoint,
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
@@ -79,44 +78,38 @@ func init() {
|
|||||||
Prefix: "gphotos",
|
Prefix: "gphotos",
|
||||||
Description: "Google Photos",
|
Description: "Google Photos",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fill in the scopes
|
switch config.State {
|
||||||
if opt.ReadOnly {
|
case "":
|
||||||
oauthConfig.Scopes[0] = scopeReadOnly
|
// Fill in the scopes
|
||||||
} else {
|
if opt.ReadOnly {
|
||||||
oauthConfig.Scopes[0] = scopeReadWrite
|
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
||||||
|
} else {
|
||||||
|
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
||||||
|
}
|
||||||
|
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||||
|
OAuth2Config: oauthConfig,
|
||||||
|
})
|
||||||
|
case "warning":
|
||||||
|
// Warn the user as required by google photos integration
|
||||||
|
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||||
|
|
||||||
|
IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||||
|
are stored in full resolution at original quality. These uploads
|
||||||
|
will count towards storage in your Google Account.`)
|
||||||
|
case "warning_done":
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||||
// Do the oauth
|
|
||||||
err = oauthutil.Config("google photos", name, m, oauthConfig, nil)
|
|
||||||
if err != nil {
|
|
||||||
golog.Fatalf("Failed to configure token: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warn the user
|
|
||||||
fmt.Print(`
|
|
||||||
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
|
||||||
*** are stored in full resolution at original quality. These uploads
|
|
||||||
*** will count towards storage in your Google Account.
|
|
||||||
|
|
||||||
`)
|
|
||||||
|
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
|
||||||
Help: "Google Application Client Id\nLeave blank normally.",
|
|
||||||
}, {
|
|
||||||
Name: config.ConfigClientSecret,
|
|
||||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
|
||||||
}, {
|
|
||||||
Name: "read_only",
|
Name: "read_only",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Set to make the Google Photos backend read only.
|
Help: `Set to make the Google Photos backend read only.
|
||||||
@@ -139,15 +132,33 @@ you want to read the media.`,
|
|||||||
Default: 2000,
|
Default: 2000,
|
||||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}},
|
}, {
|
||||||
|
Name: "include_archived",
|
||||||
|
Default: false,
|
||||||
|
Help: `Also view and download archived media.
|
||||||
|
|
||||||
|
By default rclone does not request archived media. Thus, when syncing,
|
||||||
|
archived media is not visible in directory listings or transferred.
|
||||||
|
|
||||||
|
Note that media in albums is always visible and synced, no matter
|
||||||
|
their archive status.
|
||||||
|
|
||||||
|
With this flag, archived media are always visible in directory
|
||||||
|
listings and transferred.
|
||||||
|
|
||||||
|
Without this flag, archived media will not be visible in directory
|
||||||
|
listings and won't be transferred.`,
|
||||||
|
Advanced: true,
|
||||||
|
}}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
ReadOnly bool `config:"read_only"`
|
ReadOnly bool `config:"read_only"`
|
||||||
ReadSize bool `config:"read_size"`
|
ReadSize bool `config:"read_size"`
|
||||||
StartYear int `config:"start_year"`
|
StartYear int `config:"start_year"`
|
||||||
|
IncludeArchived bool `config:"include_archived"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
@@ -213,6 +224,10 @@ func (f *Fs) startYear() int {
|
|||||||
return f.opt.StartYear
|
return f.opt.StartYear
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) includeArchived() bool {
|
||||||
|
return f.opt.IncludeArchived
|
||||||
|
}
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
429, // Too Many Requests.
|
429, // Too Many Requests.
|
||||||
@@ -225,7 +240,10 @@ var retryErrorCodes = []int{
|
|||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -253,7 +271,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -261,8 +279,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
baseClient := fshttp.NewClient(fs.Config)
|
baseClient := fshttp.NewClient(ctx)
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Box")
|
return nil, errors.Wrap(err, "failed to configure Box")
|
||||||
}
|
}
|
||||||
@@ -279,14 +297,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
unAuth: rest.NewClient(baseClient),
|
unAuth: rest.NewClient(baseClient),
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
ts: ts,
|
ts: ts,
|
||||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||||
startTime: time.Now(),
|
startTime: time.Now(),
|
||||||
albums: map[bool]*albums{},
|
albums: map[bool]*albums{},
|
||||||
uploaded: dirtree.New(),
|
uploaded: dirtree.New(),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
_, _, pattern := patterns.match(f.root, "", true)
|
_, _, pattern := patterns.match(f.root, "", true)
|
||||||
@@ -295,7 +313,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
var leaf string
|
var leaf string
|
||||||
f.root, leaf = path.Split(f.root)
|
f.root, leaf = path.Split(f.root)
|
||||||
f.root = strings.TrimRight(f.root, "/")
|
f.root = strings.TrimRight(f.root, "/")
|
||||||
_, err := f.NewObject(context.TODO(), leaf)
|
_, err := f.NewObject(ctx, leaf)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return f, fs.ErrorIsFile
|
return f, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
@@ -314,7 +332,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
|||||||
var openIDconfig map[string]interface{}
|
var openIDconfig map[string]interface{}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "couldn't read openID config")
|
return "", errors.Wrap(err, "couldn't read openID config")
|
||||||
@@ -343,7 +361,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
|||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
|
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't read user info")
|
return nil, errors.Wrap(err, "couldn't read user info")
|
||||||
@@ -374,7 +392,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
|||||||
var res interface{}
|
var res interface{}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't revoke token")
|
return errors.Wrap(err, "couldn't revoke token")
|
||||||
@@ -461,7 +479,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list albums")
|
return nil, errors.Wrap(err, "couldn't list albums")
|
||||||
@@ -504,13 +522,19 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
|||||||
}
|
}
|
||||||
filter.PageSize = listChunks
|
filter.PageSize = listChunks
|
||||||
filter.PageToken = ""
|
filter.PageToken = ""
|
||||||
|
if filter.AlbumID == "" { // album ID and filters cannot be set together, else error 400 INVALID_ARGUMENT
|
||||||
|
if filter.Filters == nil {
|
||||||
|
filter.Filters = &api.Filters{}
|
||||||
|
}
|
||||||
|
filter.Filters.IncludeArchivedMedia = &f.opt.IncludeArchived
|
||||||
|
}
|
||||||
lastID := ""
|
lastID := ""
|
||||||
for {
|
for {
|
||||||
var result api.MediaItems
|
var result api.MediaItems
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
|
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't list files")
|
return errors.Wrap(err, "couldn't list files")
|
||||||
@@ -654,7 +678,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't create album")
|
return nil, errors.Wrap(err, "couldn't create album")
|
||||||
@@ -789,7 +813,7 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Reading size failed: %v", err)
|
fs.Debugf(o, "Reading size failed: %v", err)
|
||||||
@@ -840,7 +864,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't get media item")
|
return errors.Wrap(err, "couldn't get media item")
|
||||||
@@ -917,7 +941,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -972,10 +996,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
}
|
}
|
||||||
token, err = rest.ReadBody(resp)
|
token, err = rest.ReadBody(resp)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't upload file")
|
return errors.Wrap(err, "couldn't upload file")
|
||||||
@@ -1003,7 +1027,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
var result api.BatchCreateResponse
|
var result api.BatchCreateResponse
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to create media item")
|
return errors.Wrap(err, "failed to create media item")
|
||||||
@@ -1048,7 +1072,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't delete item from album")
|
return errors.Wrap(err, "couldn't delete item from album")
|
||||||
|
|||||||
@@ -35,14 +35,14 @@ func TestIntegration(t *testing.T) {
|
|||||||
if *fstest.RemoteName == "" {
|
if *fstest.RemoteName == "" {
|
||||||
*fstest.RemoteName = "TestGooglePhotos:"
|
*fstest.RemoteName = "TestGooglePhotos:"
|
||||||
}
|
}
|
||||||
f, err := fs.NewFs(*fstest.RemoteName)
|
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||||
if err == fs.ErrorNotFoundInConfigFile {
|
if err == fs.ErrorNotFoundInConfigFile {
|
||||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Create local Fs pointing at testfiles
|
// Create local Fs pointing at testfiles
|
||||||
localFs, err := fs.NewFs("testfiles")
|
localFs, err := fs.NewFs(ctx, "testfiles")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Run("CreateAlbum", func(t *testing.T) {
|
t.Run("CreateAlbum", func(t *testing.T) {
|
||||||
@@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Check it is there in the date/month/year heirachy
|
// Check it is there in the date/month/year hierarchy
|
||||||
// 2013-07-13 is the creation date of the folder
|
// 2013-07-13 is the creation date of the folder
|
||||||
checkPresent := func(t *testing.T, objPath string) {
|
checkPresent := func(t *testing.T, objPath string) {
|
||||||
entries, err := f.List(ctx, objPath)
|
entries, err := f.List(ctx, objPath)
|
||||||
@@ -155,7 +155,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("NewFsIsFile", func(t *testing.T) {
|
t.Run("NewFsIsFile", func(t *testing.T) {
|
||||||
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
|
fNew, err := fs.NewFs(ctx, *fstest.RemoteName+remote)
|
||||||
assert.Equal(t, fs.ErrorIsFile, err)
|
assert.Equal(t, fs.ErrorIsFile, err)
|
||||||
leaf := path.Base(remote)
|
leaf := path.Base(remote)
|
||||||
o, err := fNew.NewObject(ctx, leaf)
|
o, err := fNew.NewObject(ctx, leaf)
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ type lister interface {
|
|||||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||||
dirTime() time.Time
|
dirTime() time.Time
|
||||||
startYear() int
|
startYear() int
|
||||||
|
includeArchived() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// dirPattern describes a single directory pattern
|
// dirPattern describes a single directory pattern
|
||||||
|
|||||||
@@ -64,6 +64,11 @@ func (f *testLister) startYear() int {
|
|||||||
return 2000
|
return 2000
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mock includeArchived for testing
|
||||||
|
func (f *testLister) includeArchived() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func TestPatternMatch(t *testing.T) {
|
func TestPatternMatch(t *testing.T) {
|
||||||
for testNumber, test := range []struct {
|
for testNumber, test := range []struct {
|
||||||
// input
|
// input
|
||||||
|
|||||||
320
backend/hdfs/fs.go
Normal file
320
backend/hdfs/fs.go
Normal file
@@ -0,0 +1,320 @@
|
|||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package hdfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/user"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/colinmarc/hdfs/v2"
|
||||||
|
krb "github.com/jcmturner/gokrb5/v8/client"
|
||||||
|
"github.com/jcmturner/gokrb5/v8/config"
|
||||||
|
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fs represents a HDFS server
|
||||||
|
type Fs struct {
|
||||||
|
name string
|
||||||
|
root string
|
||||||
|
features *fs.Features // optional features
|
||||||
|
opt Options // options for this backend
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
|
client *hdfs.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||||
|
func getKerberosClient() (*krb.Client, error) {
|
||||||
|
configPath := os.Getenv("KRB5_CONFIG")
|
||||||
|
if configPath == "" {
|
||||||
|
configPath = "/etc/krb5.conf"
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := config.Load(configPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the ccache location from the environment, falling back to the
|
||||||
|
// default location.
|
||||||
|
ccachePath := os.Getenv("KRB5CCNAME")
|
||||||
|
if strings.Contains(ccachePath, ":") {
|
||||||
|
if strings.HasPrefix(ccachePath, "FILE:") {
|
||||||
|
ccachePath = strings.SplitN(ccachePath, ":", 2)[1]
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unusable ccache: %s", ccachePath)
|
||||||
|
}
|
||||||
|
} else if ccachePath == "" {
|
||||||
|
u, err := user.Current()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
|
||||||
|
}
|
||||||
|
|
||||||
|
ccache, err := credentials.LoadCCache(ccachePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := krb.NewFromCCache(ccache, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs constructs an Fs from the path
|
||||||
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
options := hdfs.ClientOptions{
|
||||||
|
Addresses: []string{opt.Namenode},
|
||||||
|
UseDatanodeHostname: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.ServicePrincipalName != "" {
|
||||||
|
options.KerberosClient, err = getKerberosClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
|
||||||
|
}
|
||||||
|
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
||||||
|
|
||||||
|
if opt.DataTransferProtection != "" {
|
||||||
|
options.DataTransferProtection = opt.DataTransferProtection
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
options.User = opt.Username
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := hdfs.NewClient(options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
|
ci: fs.GetConfig(ctx),
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
|
info, err := f.client.Stat(f.realpath(""))
|
||||||
|
if err == nil && !info.IsDir() {
|
||||||
|
f.root = path.Dir(f.root)
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of this fs
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a description of the FS
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision return the precision of this Fs
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes are not supported
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return hash.Set(hash.None)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject finds file at remote or return fs.ErrorObjectNotFound
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
realpath := f.realpath(remote)
|
||||||
|
fs.Debugf(f, "new [%s]", realpath)
|
||||||
|
|
||||||
|
info, err := f.ensureFile(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
size: info.Size(),
|
||||||
|
modTime: info.ModTime(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the objects and directories in dir into entries.
|
||||||
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
realpath := f.realpath(dir)
|
||||||
|
fs.Debugf(f, "list [%s]", realpath)
|
||||||
|
|
||||||
|
err = f.ensureDirectory(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := f.client.ReadDir(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, x := range list {
|
||||||
|
stdName := f.opt.Enc.ToStandardName(x.Name())
|
||||||
|
remote := path.Join(dir, stdName)
|
||||||
|
if x.IsDir() {
|
||||||
|
entries = append(entries, fs.NewDir(remote, x.ModTime()))
|
||||||
|
} else {
|
||||||
|
entries = append(entries, &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
size: x.Size(),
|
||||||
|
modTime: x.ModTime()})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the object
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
o := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: src.Remote(),
|
||||||
|
}
|
||||||
|
err := o.Update(ctx, in, src, options...)
|
||||||
|
return o, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
return f.Put(ctx, in, src, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir makes a directory
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
fs.Debugf(f, "mkdir [%s]", f.realpath(dir))
|
||||||
|
return f.client.MkdirAll(f.realpath(dir), 0755)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir deletes the directory
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
realpath := f.realpath(dir)
|
||||||
|
fs.Debugf(f, "rmdir [%s]", realpath)
|
||||||
|
|
||||||
|
err := f.ensureDirectory(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// do not remove empty directory
|
||||||
|
list, err := f.client.ReadDir(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(list) > 0 {
|
||||||
|
return fs.ErrorDirectoryNotEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.client.Remove(realpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge deletes all the files in the directory
|
||||||
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
|
realpath := f.realpath(dir)
|
||||||
|
fs.Debugf(f, "purge [%s]", realpath)
|
||||||
|
|
||||||
|
err := f.ensureDirectory(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.client.RemoveAll(realpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// About gets quota information from the Fs
|
||||||
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
|
info, err := f.client.StatFs()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &fs.Usage{
|
||||||
|
Total: fs.NewUsageValue(int64(info.Capacity)),
|
||||||
|
Used: fs.NewUsageValue(int64(info.Used)),
|
||||||
|
Free: fs.NewUsageValue(int64(info.Remaining)),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) ensureDirectory(realpath string) error {
|
||||||
|
info, err := f.client.Stat(realpath)
|
||||||
|
|
||||||
|
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !info.IsDir() {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) {
|
||||||
|
info, err := f.client.Stat(realpath)
|
||||||
|
|
||||||
|
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if info.IsDir() {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) realpath(dir string) string {
|
||||||
|
return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.Purger = (*Fs)(nil)
|
||||||
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
|
)
|
||||||
86
backend/hdfs/hdfs.go
Normal file
86
backend/hdfs/hdfs.go
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package hdfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
fsi := &fs.RegInfo{
|
||||||
|
Name: "hdfs",
|
||||||
|
Description: "Hadoop distributed file system",
|
||||||
|
NewFs: NewFs,
|
||||||
|
Options: []fs.Option{{
|
||||||
|
Name: "namenode",
|
||||||
|
Help: "hadoop name node and port",
|
||||||
|
Required: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "namenode:8020",
|
||||||
|
Help: "Connect to host namenode at port 8020",
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "username",
|
||||||
|
Help: "hadoop user name",
|
||||||
|
Required: false,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "root",
|
||||||
|
Help: "Connect to hdfs as root",
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "service_principal_name",
|
||||||
|
Help: `Kerberos service principal name for the namenode
|
||||||
|
|
||||||
|
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||||
|
(SERVICE/FQDN) for the namenode.`,
|
||||||
|
Required: false,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "hdfs/namenode.hadoop.docker",
|
||||||
|
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
|
||||||
|
}},
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "data_transfer_protection",
|
||||||
|
Help: `Kerberos data transfer protection: authentication|integrity|privacy
|
||||||
|
|
||||||
|
Specifies whether or not authentication, data signature integrity
|
||||||
|
checks, and wire encryption is required when communicating the the
|
||||||
|
datanodes. Possible values are 'authentication', 'integrity' and
|
||||||
|
'privacy'. Used only with KERBEROS enabled.`,
|
||||||
|
Required: false,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "privacy",
|
||||||
|
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||||
|
}},
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: config.ConfigEncoding,
|
||||||
|
Help: config.ConfigEncodingHelp,
|
||||||
|
Advanced: true,
|
||||||
|
Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon),
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
fs.Register(fsi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options for this backend
|
||||||
|
type Options struct {
|
||||||
|
Namenode string `config:"namenode"`
|
||||||
|
Username string `config:"username"`
|
||||||
|
ServicePrincipalName string `config:"service_principal_name"`
|
||||||
|
DataTransferProtection string `config:"data_transfer_protection"`
|
||||||
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// xPath make correct file path with leading '/'
|
||||||
|
func xPath(root string, tail string) string {
|
||||||
|
if !strings.HasPrefix(root, "/") {
|
||||||
|
root = "/" + root
|
||||||
|
}
|
||||||
|
return path.Join(root, tail)
|
||||||
|
}
|
||||||
20
backend/hdfs/hdfs_test.go
Normal file
20
backend/hdfs/hdfs_test.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
// Test HDFS filesystem interface
|
||||||
|
|
||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package hdfs_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/hdfs"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestHdfs:",
|
||||||
|
NilObject: (*hdfs.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
6
backend/hdfs/hdfs_unsupported.go
Normal file
6
backend/hdfs/hdfs_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
// Build for hdfs for unsupported platforms to stop go complaining
|
||||||
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
|
// +build plan9
|
||||||
|
|
||||||
|
package hdfs
|
||||||
177
backend/hdfs/object.go
Normal file
177
backend/hdfs/object.go
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package hdfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Object describes an HDFS file
|
||||||
|
type Object struct {
|
||||||
|
fs *Fs
|
||||||
|
remote string
|
||||||
|
size int64
|
||||||
|
modTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs returns the parent Fs
|
||||||
|
func (o *Object) Fs() fs.Info {
|
||||||
|
return o.fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the remote path
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of an object in bytes
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
return o.size
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the object
|
||||||
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
return o.modTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime sets the modification time of the local fs object
|
||||||
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
|
realpath := o.fs.realpath(o.Remote())
|
||||||
|
err := o.fs.client.Chtimes(realpath, modTime, modTime)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o.modTime = modTime
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable returns whether this object is storable
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a string version
|
||||||
|
func (o *Object) String() string {
|
||||||
|
if o == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return o.Remote()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash is not supported
|
||||||
|
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||||
|
return "", hash.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open an object for read
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
realpath := o.realpath()
|
||||||
|
fs.Debugf(o.fs, "open [%s]", realpath)
|
||||||
|
f, err := o.fs.client.Open(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset, limit int64 = 0, -1
|
||||||
|
for _, option := range options {
|
||||||
|
switch x := option.(type) {
|
||||||
|
case *fs.SeekOption:
|
||||||
|
offset = x.Offset
|
||||||
|
case *fs.RangeOption:
|
||||||
|
offset, limit = x.Decode(o.Size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.Seek(offset, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if limit != -1 {
|
||||||
|
in = readers.NewLimitedReadCloser(f, limit)
|
||||||
|
} else {
|
||||||
|
in = f
|
||||||
|
}
|
||||||
|
|
||||||
|
return in, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update object
|
||||||
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
realpath := o.fs.realpath(src.Remote())
|
||||||
|
dirname := path.Dir(realpath)
|
||||||
|
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||||
|
|
||||||
|
err := o.fs.client.MkdirAll(dirname, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := o.fs.client.Stat(realpath)
|
||||||
|
if err == nil {
|
||||||
|
err = o.fs.client.Remove(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := o.fs.client.Create(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup := func() {
|
||||||
|
rerr := o.fs.client.Remove(realpath)
|
||||||
|
if rerr != nil {
|
||||||
|
fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(out, in)
|
||||||
|
if err != nil {
|
||||||
|
cleanup()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = out.Close()
|
||||||
|
if err != nil {
|
||||||
|
cleanup()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err = o.fs.client.Stat(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o.size = info.Size()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove an object
|
||||||
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
|
realpath := o.fs.realpath(o.remote)
|
||||||
|
fs.Debugf(o.fs, "remove [%s]", realpath)
|
||||||
|
return o.fs.client.Remove(realpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Object) realpath() string {
|
||||||
|
return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Object = (*Object)(nil)
|
||||||
|
)
|
||||||
@@ -58,7 +58,7 @@ The input format is comma separated list of key,value pairs. Standard
|
|||||||
|
|
||||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||||
|
|
||||||
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
|
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||||
`,
|
`,
|
||||||
Default: fs.CommaSepList{},
|
Default: fs.CommaSepList{},
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -115,8 +115,9 @@ type Options struct {
|
|||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
opt Options // options for this backend
|
opt Options // options for this backend
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
endpoint *url.URL
|
endpoint *url.URL
|
||||||
endpointURL string // endpoint as a string
|
endpointURL string // endpoint as a string
|
||||||
httpClient *http.Client
|
httpClient *http.Client
|
||||||
@@ -145,8 +146,7 @@ func statusError(res *http.Response, err error) error {
|
|||||||
|
|
||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
// NewFs creates a new Fs object from the name and root. It connects to
|
||||||
// the host specified in the config file.
|
// the host specified in the config file.
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.TODO()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -172,7 +172,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
client := fshttp.NewClient(fs.Config)
|
client := fshttp.NewClient(ctx)
|
||||||
|
|
||||||
var isFile = false
|
var isFile = false
|
||||||
if !strings.HasSuffix(u.String(), "/") {
|
if !strings.HasSuffix(u.String(), "/") {
|
||||||
@@ -183,9 +183,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return http.ErrUseLastResponse
|
return http.ErrUseLastResponse
|
||||||
}
|
}
|
||||||
// check to see if points to a file
|
// check to see if points to a file
|
||||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
addHeaders(req, opt)
|
addHeaders(req, opt)
|
||||||
res, err := noRedir.Do(req)
|
res, err := noRedir.Do(req)
|
||||||
err = statusError(res, err)
|
err = statusError(res, err)
|
||||||
@@ -210,17 +209,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
httpClient: client,
|
httpClient: client,
|
||||||
endpoint: u,
|
endpoint: u,
|
||||||
endpointURL: u.String(),
|
endpointURL: u.String(),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
if isFile {
|
if isFile {
|
||||||
return f, fs.ErrorIsFile
|
return f, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
@@ -389,11 +390,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
|||||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||||
}
|
}
|
||||||
// Do the request
|
// Do the request
|
||||||
req, err := http.NewRequest("GET", URL, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "readDir failed")
|
return nil, errors.Wrap(err, "readDir failed")
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
f.addHeaders(req)
|
f.addHeaders(req)
|
||||||
res, err := f.httpClient.Do(req)
|
res, err := f.httpClient.Do(req)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -440,14 +440,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
var (
|
var (
|
||||||
entriesMu sync.Mutex // to protect entries
|
entriesMu sync.Mutex // to protect entries
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
in = make(chan string, fs.Config.Checkers)
|
checkers = f.ci.Checkers
|
||||||
|
in = make(chan string, checkers)
|
||||||
)
|
)
|
||||||
add := func(entry fs.DirEntry) {
|
add := func(entry fs.DirEntry) {
|
||||||
entriesMu.Lock()
|
entriesMu.Lock()
|
||||||
entries = append(entries, entry)
|
entries = append(entries, entry)
|
||||||
entriesMu.Unlock()
|
entriesMu.Unlock()
|
||||||
}
|
}
|
||||||
for i := 0; i < fs.Config.Checkers; i++ {
|
for i := 0; i < checkers; i++ {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@@ -544,11 +545,10 @@ func (o *Object) stat(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
url := o.url()
|
url := o.url()
|
||||||
req, err := http.NewRequest("HEAD", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "stat failed")
|
return errors.Wrap(err, "stat failed")
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
o.fs.addHeaders(req)
|
o.fs.addHeaders(req)
|
||||||
res, err := o.fs.httpClient.Do(req)
|
res, err := o.fs.httpClient.Do(req)
|
||||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||||
@@ -585,7 +585,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
|
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||||
func (o *Object) Storable() bool {
|
func (o *Object) Storable() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -593,11 +593,10 @@ func (o *Object) Storable() bool {
|
|||||||
// Open a remote http file object for reading. Seek is supported
|
// Open a remote http file object for reading. Seek is supported
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
url := o.url()
|
url := o.url()
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Open failed")
|
return nil, errors.Wrap(err, "Open failed")
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
|
|
||||||
// Add optional headers
|
// Add optional headers
|
||||||
for k, v := range fs.OpenOptionHeaders(options) {
|
for k, v := range fs.OpenOptionHeaders(options) {
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config/configfile"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
|||||||
ts := httptest.NewServer(handler)
|
ts := httptest.NewServer(handler)
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.LoadConfig()
|
configfile.Install()
|
||||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||||
// fs.Config.DumpHeaders = true
|
// fs.Config.DumpHeaders = true
|
||||||
// fs.Config.DumpBodies = true
|
// fs.Config.DumpBodies = true
|
||||||
@@ -69,7 +69,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
|||||||
m, tidy := prepareServer(t)
|
m, tidy := prepareServer(t)
|
||||||
|
|
||||||
// Instantiate it
|
// Instantiate it
|
||||||
f, err := NewFs(remoteName, "", m)
|
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return f, tidy
|
return f, tidy
|
||||||
@@ -214,7 +214,7 @@ func TestIsAFileRoot(t *testing.T) {
|
|||||||
m, tidy := prepareServer(t)
|
m, tidy := prepareServer(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
f, err := NewFs(remoteName, "one%.txt", m)
|
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
testListRoot(t, f, false)
|
testListRoot(t, f, false)
|
||||||
@@ -224,7 +224,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
|||||||
m, tidy := prepareServer(t)
|
m, tidy := prepareServer(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
entries, err := f.List(context.Background(), "")
|
entries, err := f.List(context.Background(), "")
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -24,7 +24,7 @@ func newAuth(f *Fs) *auth {
|
|||||||
// Request constructs an http.Request for authentication
|
// Request constructs an http.Request for authentication
|
||||||
//
|
//
|
||||||
// returns nil for not needed
|
// returns nil for not needed
|
||||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||||
const retries = 10
|
const retries = 10
|
||||||
for try := 1; try <= retries; try++ {
|
for try := 1; try <= retries; try++ {
|
||||||
err = a.f.getCredentials(context.TODO())
|
err = a.f.getCredentials(context.TODO())
|
||||||
@@ -38,7 +38,7 @@ func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Response parses the result of an http request
|
// Response parses the result of an http request
|
||||||
func (a *auth) Response(resp *http.Response) error {
|
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,23 +4,21 @@ package hubic
|
|||||||
|
|
||||||
// This uses the normal swift mechanism to update the credentials and
|
// This uses the normal swift mechanism to update the credentials and
|
||||||
// ignores the expires field returned by the Hubic API. This may need
|
// ignores the expires field returned by the Hubic API. This may need
|
||||||
// to be revisted after some actual experience.
|
// to be revisited after some actual experience.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
swiftLib "github.com/ncw/swift"
|
swiftLib "github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/swift"
|
"github.com/rclone/rclone/backend/swift"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
@@ -57,19 +55,12 @@ func init() {
|
|||||||
Name: "hubic",
|
Name: "hubic",
|
||||||
Description: "Hubic",
|
Description: "Hubic",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
err := oauthutil.Config("hubic", name, m, oauthConfig, nil)
|
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||||
if err != nil {
|
OAuth2Config: oauthConfig,
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
})
|
||||||
}
|
|
||||||
},
|
},
|
||||||
Options: append([]fs.Option{{
|
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||||
Name: config.ConfigClientID,
|
|
||||||
Help: "Hubic Client Id\nLeave blank normally.",
|
|
||||||
}, {
|
|
||||||
Name: config.ConfigClientSecret,
|
|
||||||
Help: "Hubic Client Secret\nLeave blank normally.",
|
|
||||||
}}, swift.SharedOptions...),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,7 +69,7 @@ func init() {
|
|||||||
type credentials struct {
|
type credentials struct {
|
||||||
Token string `json:"token"` // OpenStack token
|
Token string `json:"token"` // OpenStack token
|
||||||
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||||
Expires string `json:"expires"` // Expires date - eg "2015-11-09T14:24:56+01:00"
|
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote hubic
|
// Fs represents a remote hubic
|
||||||
@@ -117,11 +108,10 @@ func (f *Fs) String() string {
|
|||||||
//
|
//
|
||||||
// The credentials are read into the Fs
|
// The credentials are read into the Fs
|
||||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||||
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
resp, err := f.client.Do(req)
|
resp, err := f.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -153,8 +143,8 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||||
}
|
}
|
||||||
@@ -164,13 +154,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make the swift Connection
|
// Make the swift Connection
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
c := &swiftLib.Connection{
|
c := &swiftLib.Connection{
|
||||||
Auth: newAuth(f),
|
Auth: newAuth(f),
|
||||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||||
Transport: fshttp.NewTransport(fs.Config),
|
Transport: fshttp.NewTransport(ctx),
|
||||||
}
|
}
|
||||||
err = c.Authenticate()
|
err = c.Authenticate(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||||
}
|
}
|
||||||
@@ -183,7 +174,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make inner swift Fs from the connection
|
// Make inner swift Fs from the connection
|
||||||
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
|
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
if err != nil && err != fs.ErrorIsFile {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -153,9 +153,9 @@ type CustomerInfo struct {
|
|||||||
AccountType string `json:"account_type"`
|
AccountType string `json:"account_type"`
|
||||||
SubscriptionType string `json:"subscription_type"`
|
SubscriptionType string `json:"subscription_type"`
|
||||||
Usage int64 `json:"usage"`
|
Usage int64 `json:"usage"`
|
||||||
Qouta int64 `json:"quota"`
|
Quota int64 `json:"quota"`
|
||||||
BusinessUsage int64 `json:"business_usage"`
|
BusinessUsage int64 `json:"business_usage"`
|
||||||
BusinessQouta int64 `json:"business_quota"`
|
BusinessQuota int64 `json:"business_quota"`
|
||||||
WriteLocked bool `json:"write_locked"`
|
WriteLocked bool `json:"write_locked"`
|
||||||
ReadLocked bool `json:"read_locked"`
|
ReadLocked bool `json:"read_locked"`
|
||||||
LockedCause interface{} `json:"locked_cause"`
|
LockedCause interface{} `json:"locked_cause"`
|
||||||
@@ -386,7 +386,7 @@ type Error struct {
|
|||||||
Cause string `xml:"cause"`
|
Cause string `xml:"cause"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and statistifes the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
out := fmt.Sprintf("error %d", e.StatusCode)
|
out := fmt.Sprintf("error %d", e.StatusCode)
|
||||||
if e.Message != "" {
|
if e.Message != "" {
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@@ -49,31 +48,27 @@ const (
|
|||||||
rootURL = "https://jfs.jottacloud.com/jfs/"
|
rootURL = "https://jfs.jottacloud.com/jfs/"
|
||||||
apiURL = "https://api.jottacloud.com/"
|
apiURL = "https://api.jottacloud.com/"
|
||||||
baseURL = "https://www.jottacloud.com/"
|
baseURL = "https://www.jottacloud.com/"
|
||||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
|
||||||
cachePrefix = "rclone-jcmd5-"
|
cachePrefix = "rclone-jcmd5-"
|
||||||
configDevice = "device"
|
configDevice = "device"
|
||||||
configMountpoint = "mountpoint"
|
configMountpoint = "mountpoint"
|
||||||
configTokenURL = "tokenURL"
|
configTokenURL = "tokenURL"
|
||||||
configClientID = "client_id"
|
configClientID = "client_id"
|
||||||
configClientSecret = "client_secret"
|
configClientSecret = "client_secret"
|
||||||
|
configUsername = "username"
|
||||||
configVersion = 1
|
configVersion = 1
|
||||||
|
|
||||||
v1tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||||
v1registerURL = "https://api.jottacloud.com/auth/v1/register"
|
defaultClientID = "jottacli"
|
||||||
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
|
||||||
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
|
||||||
v1configVersion = 0
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||||
// Description of how to auth for this app for a personal account
|
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
|
||||||
oauthConfig = &oauth2.Config{
|
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||||
Endpoint: oauth2.Endpoint{
|
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||||
AuthURL: defaultTokenURL,
|
legacyConfigVersion = 0
|
||||||
TokenURL: defaultTokenURL,
|
|
||||||
},
|
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||||
}
|
teliaCloudClientID = "desktop"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -83,37 +78,7 @@ func init() {
|
|||||||
Name: "jottacloud",
|
Name: "jottacloud",
|
||||||
Description: "Jottacloud",
|
Description: "Jottacloud",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: Config,
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
refresh := false
|
|
||||||
if version, ok := m.Get("configVersion"); ok {
|
|
||||||
ver, err := strconv.Atoi(version)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to parse config version - corrupted config")
|
|
||||||
}
|
|
||||||
refresh = (ver != configVersion) && (ver != v1configVersion)
|
|
||||||
}
|
|
||||||
|
|
||||||
if refresh {
|
|
||||||
fmt.Printf("Config outdated - refreshing\n")
|
|
||||||
} else {
|
|
||||||
tokenString, ok := m.Get("token")
|
|
||||||
if ok && tokenString != "" {
|
|
||||||
fmt.Printf("Already have a token - refresh?\n")
|
|
||||||
if !config.Confirm(false) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
|
||||||
if config.Confirm(false) {
|
|
||||||
v1config(ctx, name, m)
|
|
||||||
} else {
|
|
||||||
v2config(ctx, name, m)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "md5_memory_limit",
|
Name: "md5_memory_limit",
|
||||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||||
@@ -134,6 +99,11 @@ func init() {
|
|||||||
Help: "Files bigger than this can be resumed if the upload fail's.",
|
Help: "Files bigger than this can be resumed if the upload fail's.",
|
||||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_versions",
|
||||||
|
Help: "Avoid server side versioning by deleting files and recreating files instead of overwriting them.",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -148,6 +118,183 @@ func init() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Config runs the backend configuration protocol
|
||||||
|
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
|
switch config.State {
|
||||||
|
case "":
|
||||||
|
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type`, []fs.OptionExample{{
|
||||||
|
Value: "standard",
|
||||||
|
Help: "Standard authentication - use this if you're a normal Jottacloud user.",
|
||||||
|
}, {
|
||||||
|
Value: "legacy",
|
||||||
|
Help: "Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||||
|
}, {
|
||||||
|
Value: "telia",
|
||||||
|
Help: "Telia Cloud authentication - use this if you are using Telia Cloud.",
|
||||||
|
}})
|
||||||
|
case "auth_type_done":
|
||||||
|
// Jump to next state according to config chosen
|
||||||
|
return fs.ConfigGoto(config.Result)
|
||||||
|
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||||
|
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||||
|
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||||
|
case "standard_token":
|
||||||
|
loginToken := config.Result
|
||||||
|
m.Set(configClientID, defaultClientID)
|
||||||
|
m.Set(configClientSecret, "")
|
||||||
|
|
||||||
|
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||||
|
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to get oauth token")
|
||||||
|
}
|
||||||
|
m.Set(configTokenURL, tokenEndpoint)
|
||||||
|
err = oauthutil.PutToken(name, m, &token, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error while saving token")
|
||||||
|
}
|
||||||
|
return fs.ConfigGoto("choose_device")
|
||||||
|
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||||
|
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||||
|
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
|
||||||
|
|
||||||
|
Rclone has it's own Jottacloud API KEY which works fine as long as one
|
||||||
|
only uses rclone on a single machine. When you want to use rclone with
|
||||||
|
this account on more than one machine it's recommended to create a
|
||||||
|
machine specific API key. These keys can NOT be shared between
|
||||||
|
machines.`)
|
||||||
|
case "legacy_api":
|
||||||
|
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||||
|
if config.Result == "true" {
|
||||||
|
deviceRegistration, err := registerDevice(ctx, srv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to register device")
|
||||||
|
}
|
||||||
|
m.Set(configClientID, deviceRegistration.ClientID)
|
||||||
|
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||||
|
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||||
|
}
|
||||||
|
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
|
||||||
|
case "legacy_username":
|
||||||
|
m.Set(configUsername, config.Result)
|
||||||
|
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
|
||||||
|
case "legacy_password":
|
||||||
|
m.Set("password", config.Result)
|
||||||
|
m.Set("auth_code", "")
|
||||||
|
return fs.ConfigGoto("legacy_do_auth")
|
||||||
|
case "legacy_auth_code":
|
||||||
|
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||||
|
m.Set("auth_code", authCode)
|
||||||
|
return fs.ConfigGoto("legacy_do_auth")
|
||||||
|
case "legacy_do_auth":
|
||||||
|
username, _ := m.Get(configUsername)
|
||||||
|
password, _ := m.Get("password")
|
||||||
|
password = obscure.MustReveal(password)
|
||||||
|
authCode, _ := m.Get("auth_code")
|
||||||
|
|
||||||
|
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||||
|
clientID, ok := m.Get(configClientID)
|
||||||
|
if !ok {
|
||||||
|
clientID = legacyClientID
|
||||||
|
}
|
||||||
|
clientSecret, ok := m.Get(configClientSecret)
|
||||||
|
if !ok {
|
||||||
|
clientSecret = legacyEncryptedClientSecret
|
||||||
|
}
|
||||||
|
|
||||||
|
oauthConfig := &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
|
AuthURL: legacyTokenURL,
|
||||||
|
},
|
||||||
|
ClientID: clientID,
|
||||||
|
ClientSecret: obscure.MustReveal(clientSecret),
|
||||||
|
}
|
||||||
|
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
|
||||||
|
if err == errAuthCodeRequired {
|
||||||
|
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||||
|
}
|
||||||
|
m.Set("password", "")
|
||||||
|
m.Set("auth_code", "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to get oauth token")
|
||||||
|
}
|
||||||
|
err = oauthutil.PutToken(name, m, &token, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error while saving token")
|
||||||
|
}
|
||||||
|
return fs.ConfigGoto("choose_device")
|
||||||
|
case "telia": // telia cloud config
|
||||||
|
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||||
|
m.Set(configClientID, teliaCloudClientID)
|
||||||
|
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||||
|
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||||
|
OAuth2Config: &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
|
AuthURL: teliaCloudAuthURL,
|
||||||
|
TokenURL: teliaCloudTokenURL,
|
||||||
|
},
|
||||||
|
ClientID: teliaCloudClientID,
|
||||||
|
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||||
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
case "choose_device":
|
||||||
|
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
|
||||||
|
case "choose_device_query":
|
||||||
|
if config.Result != "true" {
|
||||||
|
m.Set(configDevice, "")
|
||||||
|
m.Set(configMountpoint, "")
|
||||||
|
return fs.ConfigGoto("end")
|
||||||
|
}
|
||||||
|
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||||
|
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||||
|
|
||||||
|
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m.Set(configUsername, cust.Username)
|
||||||
|
|
||||||
|
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
|
||||||
|
return acc.Devices[i].Name, ""
|
||||||
|
})
|
||||||
|
case "choose_device_result":
|
||||||
|
device := config.Result
|
||||||
|
m.Set(configDevice, device)
|
||||||
|
|
||||||
|
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||||
|
|
||||||
|
username, _ := m.Get(configUsername)
|
||||||
|
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
|
||||||
|
return dev.MountPoints[i].Name, ""
|
||||||
|
})
|
||||||
|
case "choose_device_mountpoint":
|
||||||
|
mountpoint := config.Result
|
||||||
|
m.Set(configMountpoint, mountpoint)
|
||||||
|
return fs.ConfigGoto("end")
|
||||||
|
case "end":
|
||||||
|
// All the config flows end up here in case we need to carry on with something
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||||
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Device string `config:"device"`
|
Device string `config:"device"`
|
||||||
@@ -155,6 +302,7 @@ type Options struct {
|
|||||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||||
TrashedOnly bool `config:"trashed_only"`
|
TrashedOnly bool `config:"trashed_only"`
|
||||||
HardDelete bool `config:"hard_delete"`
|
HardDelete bool `config:"hard_delete"`
|
||||||
|
NoVersions bool `config:"no_versions"`
|
||||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
@@ -208,10 +356,21 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// parsePath parses a box 'url'
|
// joinPath joins two path/url elements
|
||||||
func parsePath(path string) (root string) {
|
//
|
||||||
root = strings.Trim(path, "/")
|
// Does not perform clean on the result like path.Join does,
|
||||||
return
|
// which breaks urls by changing prefix "https://" into "https:/".
|
||||||
|
func joinPath(base string, rel string) string {
|
||||||
|
if rel == "" {
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(base, "/") {
|
||||||
|
return base + strings.TrimPrefix(rel, "/")
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(rel, "/") {
|
||||||
|
return strings.TrimSuffix(base, "/") + rel
|
||||||
|
}
|
||||||
|
return base + "/" + rel
|
||||||
}
|
}
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
@@ -226,74 +385,13 @@ var retryErrorCodes = []int{
|
|||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// v1config configure a jottacloud backend using legacy authentification
|
|
||||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
|
||||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
|
||||||
|
|
||||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
|
||||||
if config.Confirm(false) {
|
|
||||||
deviceRegistration, err := registerDevice(ctx, srv)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to register device: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Set(configClientID, deviceRegistration.ClientID)
|
|
||||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
|
||||||
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
|
||||||
}
|
|
||||||
|
|
||||||
clientID, ok := m.Get(configClientID)
|
|
||||||
if !ok {
|
|
||||||
clientID = v1ClientID
|
|
||||||
}
|
|
||||||
clientSecret, ok := m.Get(configClientSecret)
|
|
||||||
if !ok {
|
|
||||||
clientSecret = v1EncryptedClientSecret
|
|
||||||
}
|
|
||||||
oauthConfig.ClientID = clientID
|
|
||||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
|
||||||
|
|
||||||
oauthConfig.Endpoint.AuthURL = v1tokenURL
|
|
||||||
oauthConfig.Endpoint.TokenURL = v1tokenURL
|
|
||||||
|
|
||||||
fmt.Printf("Username> ")
|
|
||||||
username := config.ReadLine()
|
|
||||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
|
||||||
|
|
||||||
token, err := doAuthV1(ctx, srv, username, password)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to get oauth token: %s", err)
|
|
||||||
}
|
|
||||||
err = oauthutil.PutToken(name, m, &token, true)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error while saving token: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
|
||||||
if config.Confirm(false) {
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
|
||||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
|
||||||
|
|
||||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
|
||||||
}
|
|
||||||
m.Set(configDevice, device)
|
|
||||||
m.Set(configMountpoint, mountpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Set("configVersion", strconv.Itoa(v1configVersion))
|
|
||||||
}
|
|
||||||
|
|
||||||
// registerDevice register a new device for use with the jottacloud API
|
// registerDevice register a new device for use with the jottacloud API
|
||||||
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||||
// random generator to generate random device names
|
// random generator to generate random device names
|
||||||
@@ -312,7 +410,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
|||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
RootURL: v1registerURL,
|
RootURL: legacyRegisterURL,
|
||||||
ContentType: "application/x-www-form-urlencoded",
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||||
Parameters: values,
|
Parameters: values,
|
||||||
@@ -323,8 +421,13 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
|||||||
return deviceRegistration, err
|
return deviceRegistration, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// doAuthV1 runs the actual token request for V1 authentification
|
var errAuthCodeRequired = errors.New("auth code required")
|
||||||
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
|
||||||
|
// doLegacyAuth runs the actual token request for V1 authentication
|
||||||
|
//
|
||||||
|
// Call this first with blank authCode. If errAuthCodeRequired is
|
||||||
|
// returned then call it again with an authCode
|
||||||
|
func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Config, username, password, authCode string) (token oauth2.Token, err error) {
|
||||||
// prepare out token request with username and password
|
// prepare out token request with username and password
|
||||||
values := url.Values{}
|
values := url.Values{}
|
||||||
values.Set("grant_type", "PASSWORD")
|
values.Set("grant_type", "PASSWORD")
|
||||||
@@ -338,22 +441,19 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
|||||||
ContentType: "application/x-www-form-urlencoded",
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
Parameters: values,
|
Parameters: values,
|
||||||
}
|
}
|
||||||
|
if authCode != "" {
|
||||||
|
opts.ExtraHeaders = make(map[string]string)
|
||||||
|
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||||
|
}
|
||||||
|
|
||||||
// do the first request
|
// do the first request
|
||||||
var jsonToken api.TokenJSON
|
var jsonToken api.TokenJSON
|
||||||
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||||
if err != nil {
|
if err != nil && authCode == "" {
|
||||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
return token, errAuthCodeRequired
|
||||||
fmt.Printf("Enter verification code> ")
|
|
||||||
authCode := config.ReadLine()
|
|
||||||
|
|
||||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
|
||||||
opts.ExtraHeaders = make(map[string]string)
|
|
||||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
|
||||||
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -365,49 +465,11 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
|||||||
return token, err
|
return token, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentification
|
// doTokenAuth runs the actual token request for V2 authentication
|
||||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, err error) {
|
||||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
|
||||||
|
|
||||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
|
||||||
fmt.Printf("Login Token> ")
|
|
||||||
loginToken := config.ReadLine()
|
|
||||||
|
|
||||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to get oauth token: %s", err)
|
|
||||||
}
|
|
||||||
err = oauthutil.PutToken(name, m, &token, true)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error while saving token: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
|
||||||
if config.Confirm(false) {
|
|
||||||
oauthConfig.ClientID = "jottacli"
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
|
||||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
|
||||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
|
||||||
}
|
|
||||||
m.Set(configDevice, device)
|
|
||||||
m.Set(configMountpoint, mountpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
|
||||||
}
|
|
||||||
|
|
||||||
// doAuthV2 runs the actual token request for V2 authentification
|
|
||||||
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
|
||||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return token, err
|
return token, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode login token
|
// decode login token
|
||||||
@@ -415,7 +477,7 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
|
|||||||
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
|
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
|
||||||
err = decoder.Decode(&loginToken)
|
err = decoder.Decode(&loginToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return token, err
|
return token, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// retrieve endpoint urls
|
// retrieve endpoint urls
|
||||||
@@ -424,19 +486,14 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
|
|||||||
RootURL: loginToken.WellKnownLink,
|
RootURL: loginToken.WellKnownLink,
|
||||||
}
|
}
|
||||||
var wellKnown api.WellKnown
|
var wellKnown api.WellKnown
|
||||||
_, err = srv.CallJSON(ctx, &opts, nil, &wellKnown)
|
_, err = apiSrv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return token, err
|
return token, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// save the tokenurl
|
|
||||||
oauthConfig.Endpoint.AuthURL = wellKnown.TokenEndpoint
|
|
||||||
oauthConfig.Endpoint.TokenURL = wellKnown.TokenEndpoint
|
|
||||||
m.Set(configTokenURL, wellKnown.TokenEndpoint)
|
|
||||||
|
|
||||||
// prepare out token request with username and password
|
// prepare out token request with username and password
|
||||||
values := url.Values{}
|
values := url.Values{}
|
||||||
values.Set("client_id", "jottacli")
|
values.Set("client_id", defaultClientID)
|
||||||
values.Set("grant_type", "password")
|
values.Set("grant_type", "password")
|
||||||
values.Set("password", loginToken.AuthToken)
|
values.Set("password", loginToken.AuthToken)
|
||||||
values.Set("scope", "offline_access+openid")
|
values.Set("scope", "offline_access+openid")
|
||||||
@@ -444,68 +501,33 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
|
|||||||
values.Encode()
|
values.Encode()
|
||||||
opts = rest.Opts{
|
opts = rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
RootURL: wellKnown.TokenEndpoint,
|
||||||
ContentType: "application/x-www-form-urlencoded",
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
Body: strings.NewReader(values.Encode()),
|
Body: strings.NewReader(values.Encode()),
|
||||||
}
|
}
|
||||||
|
|
||||||
// do the first request
|
// do the first request
|
||||||
var jsonToken api.TokenJSON
|
var jsonToken api.TokenJSON
|
||||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
_, err = apiSrv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return token, err
|
return token, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
token.AccessToken = jsonToken.AccessToken
|
token.AccessToken = jsonToken.AccessToken
|
||||||
token.RefreshToken = jsonToken.RefreshToken
|
token.RefreshToken = jsonToken.RefreshToken
|
||||||
token.TokenType = jsonToken.TokenType
|
token.TokenType = jsonToken.TokenType
|
||||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||||
return token, err
|
return token, wellKnown.TokenEndpoint, err
|
||||||
}
|
|
||||||
|
|
||||||
// setupMountpoint sets up a custom device and mountpoint if desired by the user
|
|
||||||
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
|
|
||||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
var deviceNames []string
|
|
||||||
for i := range acc.Devices {
|
|
||||||
deviceNames = append(deviceNames, acc.Devices[i].Name)
|
|
||||||
}
|
|
||||||
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
|
||||||
device = config.Choose("Devices", deviceNames, nil, false)
|
|
||||||
|
|
||||||
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
if len(dev.MountPoints) == 0 {
|
|
||||||
return "", "", errors.New("no mountpoints for selected device")
|
|
||||||
}
|
|
||||||
var mountpointNames []string
|
|
||||||
for i := range dev.MountPoints {
|
|
||||||
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
|
|
||||||
}
|
|
||||||
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
|
|
||||||
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
|
|
||||||
|
|
||||||
return device, mountpoint, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getCustomerInfo queries general information about the account
|
// getCustomerInfo queries general information about the account
|
||||||
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
|
func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: "account/v1/customer",
|
Path: "account/v1/customer",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = srv.CallJSON(ctx, &opts, nil, &info)
|
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't get customer info")
|
return nil, errors.Wrap(err, "couldn't get customer info")
|
||||||
}
|
}
|
||||||
@@ -551,7 +573,7 @@ func (f *Fs) setEndpointURL() {
|
|||||||
if f.opt.Mountpoint == "" {
|
if f.opt.Mountpoint == "" {
|
||||||
f.opt.Mountpoint = defaultMountpoint
|
f.opt.Mountpoint = defaultMountpoint
|
||||||
}
|
}
|
||||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
@@ -564,7 +586,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
if apiErr, ok := err.(*api.Error); ok {
|
||||||
@@ -622,7 +644,7 @@ func (f *Fs) filePath(file string) string {
|
|||||||
// This filter catches all refresh requests, reads the body,
|
// This filter catches all refresh requests, reads the body,
|
||||||
// changes the case and then sends it on
|
// changes the case and then sends it on
|
||||||
func grantTypeFilter(req *http.Request) {
|
func grantTypeFilter(req *http.Request) {
|
||||||
if v1tokenURL == req.URL.String() {
|
if legacyTokenURL == req.URL.String() {
|
||||||
// read the entire body
|
// read the entire body
|
||||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -638,54 +660,50 @@ func grantTypeFilter(req *http.Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuthClient *http.Client, ts *oauthutil.TokenSource, err error) {
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
// Parse config into Options struct
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check config version
|
// Check config version
|
||||||
var ver int
|
var ver int
|
||||||
version, ok := m.Get("configVersion")
|
version, ok := m.Get("configVersion")
|
||||||
if ok {
|
if ok {
|
||||||
ver, err = strconv.Atoi(version)
|
ver, err = strconv.Atoi(version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("Failed to parse config version")
|
return nil, nil, errors.New("Failed to parse config version")
|
||||||
}
|
}
|
||||||
ok = (ver == configVersion) || (ver == v1configVersion)
|
ok = (ver == configVersion) || (ver == legacyConfigVersion)
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
return nil, nil, errors.New("Outdated config - please reconfigure this backend")
|
||||||
}
|
}
|
||||||
|
|
||||||
baseClient := fshttp.NewClient(fs.Config)
|
baseClient := fshttp.NewClient(ctx)
|
||||||
|
oauthConfig := &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
|
AuthURL: defaultTokenURL,
|
||||||
|
TokenURL: defaultTokenURL,
|
||||||
|
},
|
||||||
|
}
|
||||||
if ver == configVersion {
|
if ver == configVersion {
|
||||||
oauthConfig.ClientID = "jottacli"
|
oauthConfig.ClientID = defaultClientID
|
||||||
// if custom endpoints are set use them else stick with defaults
|
// if custom endpoints are set use them else stick with defaults
|
||||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||||
oauthConfig.Endpoint.AuthURL = tokenURL
|
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||||
}
|
}
|
||||||
} else if ver == v1configVersion {
|
} else if ver == legacyConfigVersion {
|
||||||
clientID, ok := m.Get(configClientID)
|
clientID, ok := m.Get(configClientID)
|
||||||
if !ok {
|
if !ok {
|
||||||
clientID = v1ClientID
|
clientID = legacyClientID
|
||||||
}
|
}
|
||||||
clientSecret, ok := m.Get(configClientSecret)
|
clientSecret, ok := m.Get(configClientSecret)
|
||||||
if !ok {
|
if !ok {
|
||||||
clientSecret = v1EncryptedClientSecret
|
clientSecret = legacyEncryptedClientSecret
|
||||||
}
|
}
|
||||||
oauthConfig.ClientID = clientID
|
oauthConfig.ClientID = clientID
|
||||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||||
|
|
||||||
oauthConfig.Endpoint.TokenURL = v1tokenURL
|
oauthConfig.Endpoint.TokenURL = legacyTokenURL
|
||||||
oauthConfig.Endpoint.AuthURL = v1tokenURL
|
oauthConfig.Endpoint.AuthURL = legacyTokenURL
|
||||||
|
|
||||||
// add the request filter to fix token refresh
|
// add the request filter to fix token refresh
|
||||||
if do, ok := baseClient.Transport.(interface {
|
if do, ok := baseClient.Transport.(interface {
|
||||||
@@ -698,13 +716,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create OAuth Client
|
// Create OAuth Client
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
return nil, nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||||
|
}
|
||||||
|
return oAuthClient, ts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs constructs an Fs from the path, container:path
|
||||||
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
oAuthClient, ts, err := getOAuthClient(ctx, name, m)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rootIsDir := strings.HasSuffix(root, "/")
|
rootIsDir := strings.HasSuffix(root, "/")
|
||||||
root = parsePath(root)
|
root = strings.Trim(root, "/")
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
@@ -712,14 +746,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: false,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
||||||
f.features.ListR = nil
|
f.features.ListR = nil
|
||||||
@@ -728,6 +762,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
_, err := f.readMetaDataForPath(ctx, "")
|
_, err := f.readMetaDataForPath(ctx, "")
|
||||||
|
if err == fs.ErrorNotAFile {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -801,7 +838,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
|
|||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//fmt.Printf("...Error %v\n", err)
|
//fmt.Printf("...Error %v\n", err)
|
||||||
@@ -830,7 +867,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
var result api.JottaFolder
|
var result api.JottaFolder
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -942,7 +979,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
if apiErr, ok := err.(*api.Error); ok {
|
||||||
@@ -1048,7 +1085,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.Call(ctx, &opts)
|
resp, err = f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't purge directory")
|
return errors.Wrap(err, "couldn't purge directory")
|
||||||
@@ -1070,8 +1107,8 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Purge deletes all the files and the container
|
// Purge deletes all the files and the container
|
||||||
func (f *Fs) Purge(ctx context.Context) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(ctx, "", false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||||
@@ -1087,8 +1124,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||||
retry, _ := shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
return (retry && resp.StatusCode != 500), err
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1096,7 +1132,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
|||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1126,7 +1162,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
//return f.newObjectWithInfo(remote, &result)
|
//return f.newObjectWithInfo(remote, &result)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1157,7 +1193,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1192,18 +1228,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
|
|
||||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||||
|
|
||||||
// surprise! jottacloud fucked up dirmove - the api spits out an error but
|
|
||||||
// dir gets moved regardless
|
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
|
||||||
if apiErr.StatusCode == 500 {
|
|
||||||
_, err := f.NewObject(ctx, dstRemote)
|
|
||||||
if err == fs.ErrorNotAFile {
|
|
||||||
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't move directory")
|
return errors.Wrap(err, "couldn't move directory")
|
||||||
}
|
}
|
||||||
@@ -1228,7 +1252,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
var result api.JottaFile
|
var result api.JottaFile
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
if apiErr, ok := err.(*api.Error); ok {
|
||||||
@@ -1252,8 +1276,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
if result.PublicSharePath == "" {
|
if result.PublicSharePath == "" {
|
||||||
return "", errors.New("couldn't create public link - no link path received")
|
return "", errors.New("couldn't create public link - no link path received")
|
||||||
}
|
}
|
||||||
link = path.Join(baseURL, result.PublicSharePath)
|
return joinPath(baseURL, result.PublicSharePath), nil
|
||||||
return link, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
@@ -1406,7 +1429,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1477,6 +1500,22 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
|
if o.fs.opt.NoVersions {
|
||||||
|
err := o.readMetaData(ctx, false)
|
||||||
|
if err == nil {
|
||||||
|
// if the object exists delete it
|
||||||
|
err = o.remove(ctx, true)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to remove old object")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||||
|
if err != fs.ErrorObjectNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
o.fs.tokenRenewer.Start()
|
||||||
|
defer o.fs.tokenRenewer.Stop()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
md5String, err := src.Hash(ctx, hash.MD5)
|
md5String, err := src.Hash(ctx, hash.MD5)
|
||||||
if err != nil || md5String == "" {
|
if err != nil || md5String == "" {
|
||||||
@@ -1517,13 +1556,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
var response api.AllocateFileResponse
|
var response api.AllocateFileResponse
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
|
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
|
||||||
if response.State != "COMPLETED" {
|
if response.State != "COMPLETED" {
|
||||||
// how much do we still have to upload?
|
// how much do we still have to upload?
|
||||||
remainingBytes := size - response.ResumePos
|
remainingBytes := size - response.ResumePos
|
||||||
@@ -1565,8 +1604,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
func (o *Object) remove(ctx context.Context, hard bool) error {
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: o.filePath(),
|
Path: o.filePath(),
|
||||||
@@ -1574,7 +1612,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
NoResponse: true,
|
NoResponse: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
if o.fs.opt.HardDelete {
|
if hard {
|
||||||
opts.Parameters.Set("rm", "true")
|
opts.Parameters.Set("rm", "true")
|
||||||
} else {
|
} else {
|
||||||
opts.Parameters.Set("dl", "true")
|
opts.Parameters.Set("dl", "true")
|
||||||
@@ -1582,10 +1620,15 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
|
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove an object
|
||||||
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
|
return o.remove(ctx, o.fs.opt.HardDelete)
|
||||||
|
}
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
|||||||
@@ -256,7 +256,7 @@ func (f *Fs) fullPath(part string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs a new filesystem given a root path and configuration options
|
// NewFs constructs a new filesystem given a root path and configuration options
|
||||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err = configstruct.Set(m, opt)
|
err = configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -267,7 +267,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
httpClient := httpclient.New()
|
httpClient := httpclient.New()
|
||||||
httpClient.Client = fshttp.NewClient(fs.Config)
|
httpClient.Client = fshttp.NewClient(ctx)
|
||||||
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
||||||
basicAuth := fmt.Sprintf("Basic %s",
|
basicAuth := fmt.Sprintf("Basic %s",
|
||||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||||
@@ -287,7 +287,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
BucketBased: false,
|
BucketBased: false,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
for _, m := range mounts {
|
for _, m := range mounts {
|
||||||
if opt.MountID != "" {
|
if opt.MountID != "" {
|
||||||
if m.Id == opt.MountID {
|
if m.Id == opt.MountID {
|
||||||
@@ -534,7 +534,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// About reports space usage (with a MB precision)
|
// About reports space usage (with a MiB precision)
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
mount, err := f.client.MountsDetails(f.mountID)
|
mount, err := f.client.MountsDetails(f.mountID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ package local
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -15,6 +16,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
var s syscall.Statfs_t
|
var s syscall.Statfs_t
|
||||||
err := syscall.Statfs(f.root, &s)
|
err := syscall.Statfs(f.root, &s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||||
}
|
}
|
||||||
bs := int64(s.Bsize) // nolint: unconvert
|
bs := int64(s.Bsize) // nolint: unconvert
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// +build windows plan9
|
// +build windows plan9 js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// +build !windows,!plan9
|
// +build !windows,!plan9,!js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/file"
|
"github.com/rclone/rclone/lib/file"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
|
"golang.org/x/text/unicode/norm"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@@ -42,8 +43,9 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "nounc",
|
Name: "nounc",
|
||||||
Help: "Disable UNC (long path names) conversion on Windows",
|
Help: "Disable UNC (long path names) conversion on Windows",
|
||||||
|
Advanced: runtime.GOOS != "windows",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "true",
|
Value: "true",
|
||||||
Help: "Disables long file names",
|
Help: "Disables long file names",
|
||||||
@@ -71,12 +73,35 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
|||||||
NoPrefix: true,
|
NoPrefix: true,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_unicode_normalization",
|
Name: "zero_size_links",
|
||||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
Help: `Assume the Stat size of links is zero (and read them instead) (Deprecated)
|
||||||
|
|
||||||
This flag is deprecated now. Rclone no longer normalizes unicode file
|
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places
|
||||||
names, but it compares them with unicode normalization in the sync
|
|
||||||
routine instead.`,
|
- Windows
|
||||||
|
- On some virtual filesystems (such ash LucidLink)
|
||||||
|
- Android
|
||||||
|
|
||||||
|
So rclone now always reads the link
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "unicode_normalization",
|
||||||
|
Help: `Apply unicode NFC normalization to paths and filenames
|
||||||
|
|
||||||
|
This flag can be used to normalize file names into unicode NFC form
|
||||||
|
that are read from the local filesystem.
|
||||||
|
|
||||||
|
Rclone does not normally touch the encoding of file names it reads from
|
||||||
|
the file system.
|
||||||
|
|
||||||
|
This can be useful when using macOS as it normally provides decomposed (NFD)
|
||||||
|
unicode which in some language (eg Korean) doesn't display properly on
|
||||||
|
some OSes.
|
||||||
|
|
||||||
|
Note that rclone compares filenames with unicode normalization in the sync
|
||||||
|
routine so this flag shouldn't normally be used.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -87,13 +112,13 @@ Normally rclone checks the size and modification time of files as they
|
|||||||
are being uploaded and aborts with a message which starts "can't copy
|
are being uploaded and aborts with a message which starts "can't copy
|
||||||
- source file is being updated" if the file changes during upload.
|
- source file is being updated" if the file changes during upload.
|
||||||
|
|
||||||
However on some file systems this modification time check may fail (eg
|
However on some file systems this modification time check may fail (e.g.
|
||||||
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
||||||
check can be disabled with this flag.
|
check can be disabled with this flag.
|
||||||
|
|
||||||
If this flag is set, rclone will use its best efforts to transfer a
|
If this flag is set, rclone will use its best efforts to transfer a
|
||||||
file which is being updated. If the file is only having things
|
file which is being updated. If the file is only having things
|
||||||
appended to it (eg a log) then rclone will transfer the log file with
|
appended to it (e.g. a log) then rclone will transfer the log file with
|
||||||
the size it had the first time rclone saw it.
|
the size it had the first time rclone saw it.
|
||||||
|
|
||||||
If the file is being modified throughout (not just appended to) then
|
If the file is being modified throughout (not just appended to) then
|
||||||
@@ -134,6 +159,17 @@ Windows/macOS and case sensitive for everything else. Use this flag
|
|||||||
to override the default choice.`,
|
to override the default choice.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_preallocate",
|
||||||
|
Help: `Disable preallocation of disk space for transferred files
|
||||||
|
|
||||||
|
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||||
|
However, some virtual filesystem layers (such as Google Drive File
|
||||||
|
Stream) may incorrectly set the actual file size equal to the
|
||||||
|
preallocated space, causing checksum and file size checks to fail.
|
||||||
|
Use this flag to disable preallocation.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_sparse",
|
Name: "no_sparse",
|
||||||
Help: `Disable sparse files for multi-thread downloads
|
Help: `Disable sparse files for multi-thread downloads
|
||||||
@@ -144,6 +180,17 @@ the OS zeros the file. However sparse files may be undesirable as they
|
|||||||
cause disk fragmentation and can be slow to work with.`,
|
cause disk fragmentation and can be slow to work with.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_set_modtime",
|
||||||
|
Help: `Disable setting modtime
|
||||||
|
|
||||||
|
Normally rclone updates modification time of files after they are done
|
||||||
|
uploading. This can cause permissions issues on Linux platforms when
|
||||||
|
the user rclone is running as does not own the file uploaded, such as
|
||||||
|
when copying to a CIFS mount owned by another user. If this option is
|
||||||
|
enabled, rclone will no longer update the modtime after copying a file.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -159,13 +206,15 @@ type Options struct {
|
|||||||
FollowSymlinks bool `config:"copy_links"`
|
FollowSymlinks bool `config:"copy_links"`
|
||||||
TranslateSymlinks bool `config:"links"`
|
TranslateSymlinks bool `config:"links"`
|
||||||
SkipSymlinks bool `config:"skip_links"`
|
SkipSymlinks bool `config:"skip_links"`
|
||||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
UTFNorm bool `config:"unicode_normalization"`
|
||||||
NoCheckUpdated bool `config:"no_check_updated"`
|
NoCheckUpdated bool `config:"no_check_updated"`
|
||||||
NoUNC bool `config:"nounc"`
|
NoUNC bool `config:"nounc"`
|
||||||
OneFileSystem bool `config:"one_file_system"`
|
OneFileSystem bool `config:"one_file_system"`
|
||||||
CaseSensitive bool `config:"case_sensitive"`
|
CaseSensitive bool `config:"case_sensitive"`
|
||||||
CaseInsensitive bool `config:"case_insensitive"`
|
CaseInsensitive bool `config:"case_insensitive"`
|
||||||
|
NoPreAllocate bool `config:"no_preallocate"`
|
||||||
NoSparse bool `config:"no_sparse"`
|
NoSparse bool `config:"no_sparse"`
|
||||||
|
NoSetModTime bool `config:"no_set_modtime"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -205,7 +254,7 @@ type Object struct {
|
|||||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path
|
// NewFs constructs an Fs from the path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -216,10 +265,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errLinksAndCopyLinks
|
return nil, errLinksAndCopyLinks
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.NoUTFNorm {
|
|
||||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
@@ -233,7 +278,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
IsLocal: true,
|
IsLocal: true,
|
||||||
SlowHash: true,
|
SlowHash: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
if opt.FollowSymlinks {
|
if opt.FollowSymlinks {
|
||||||
f.lstat = os.Stat
|
f.lstat = os.Stat
|
||||||
}
|
}
|
||||||
@@ -422,6 +467,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
namepath := filepath.Join(fsDirPath, name)
|
namepath := filepath.Join(fsDirPath, name)
|
||||||
fi, fierr := os.Lstat(namepath)
|
fi, fierr := os.Lstat(namepath)
|
||||||
|
if os.IsNotExist(fierr) {
|
||||||
|
// skip entry removed by a concurrent goroutine
|
||||||
|
continue
|
||||||
|
}
|
||||||
if fierr != nil {
|
if fierr != nil {
|
||||||
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
||||||
fs.Errorf(dir, "%v", fierr)
|
fs.Errorf(dir, "%v", fierr)
|
||||||
@@ -444,8 +493,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||||
localPath := filepath.Join(fsDirPath, name)
|
localPath := filepath.Join(fsDirPath, name)
|
||||||
fi, err = os.Stat(localPath)
|
fi, err = os.Stat(localPath)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||||
// Skip bad symlinks
|
// Skip bad symlinks and circular symlinks
|
||||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||||
err = accounting.Stats(ctx).Error(err)
|
err = accounting.Stats(ctx).Error(err)
|
||||||
@@ -482,6 +531,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
||||||
|
if f.opt.UTFNorm {
|
||||||
|
filename = norm.NFC.String(filename)
|
||||||
|
}
|
||||||
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
|
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
|
||||||
|
|
||||||
if !utf8.ValidString(filename) {
|
if !utf8.ValidString(filename) {
|
||||||
@@ -542,6 +594,10 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Precision of the file system
|
// Precision of the file system
|
||||||
func (f *Fs) Precision() (precision time.Duration) {
|
func (f *Fs) Precision() (precision time.Duration) {
|
||||||
|
if f.opt.NoSetModTime {
|
||||||
|
return fs.ModTimeNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
f.precisionOk.Do(func() {
|
f.precisionOk.Do(func() {
|
||||||
f.precision = f.readPrecision()
|
f.precision = f.readPrecision()
|
||||||
})
|
})
|
||||||
@@ -600,23 +656,28 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge deletes all the files and directories
|
// Purge deletes all the files in the directory
|
||||||
//
|
//
|
||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge(ctx context.Context) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
fi, err := f.lstat(f.root)
|
dir = f.localPath(dir)
|
||||||
|
fi, err := f.lstat(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// already purged
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !fi.Mode().IsDir() {
|
if !fi.Mode().IsDir() {
|
||||||
return errors.Errorf("can't purge non directory: %q", f.root)
|
return errors.Errorf("can't purge non directory: %q", dir)
|
||||||
}
|
}
|
||||||
return os.RemoveAll(f.root)
|
return os.RemoveAll(dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -680,7 +741,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -878,6 +939,9 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
|
if o.fs.opt.NoSetModTime {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
var err error
|
var err error
|
||||||
if o.translatedLink {
|
if o.translatedLink {
|
||||||
err = lChtimes(o.path, modTime, modTime)
|
err = lChtimes(o.path, modTime, modTime)
|
||||||
@@ -1088,10 +1152,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Pre-allocate the file for performance reasons
|
if !o.fs.opt.NoPreAllocate {
|
||||||
err = file.PreAllocate(src.Size(), f)
|
// Pre-allocate the file for performance reasons
|
||||||
if err != nil {
|
err = file.PreAllocate(src.Size(), f)
|
||||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
if err != nil {
|
||||||
|
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||||
|
if err == file.ErrDiskFull {
|
||||||
|
_ = f.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
out = f
|
out = f
|
||||||
} else {
|
} else {
|
||||||
@@ -1178,9 +1248,11 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Pre-allocate the file for performance reasons
|
// Pre-allocate the file for performance reasons
|
||||||
err = file.PreAllocate(size, out)
|
if !f.opt.NoPreAllocate {
|
||||||
if err != nil {
|
err = file.PreAllocate(size, out)
|
||||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
if err != nil {
|
||||||
|
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if !f.opt.NoSparse && file.SetSparseImplemented {
|
if !f.opt.NoSparse && file.SetSparseImplemented {
|
||||||
sparseWarning.Do(func() {
|
sparseWarning.Do(func() {
|
||||||
@@ -1189,7 +1261,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
|||||||
// Set the file to be a sparse file (important on Windows)
|
// Set the file to be a sparse file (important on Windows)
|
||||||
err = file.SetSparse(out)
|
err = file.SetSparse(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
fs.Errorf(o, "Failed to set sparse: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1207,6 +1279,20 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
|||||||
o.modTime = info.ModTime()
|
o.modTime = info.ModTime()
|
||||||
o.mode = info.Mode()
|
o.mode = info.Mode()
|
||||||
o.fs.objectMetaMu.Unlock()
|
o.fs.objectMetaMu.Unlock()
|
||||||
|
// Read the size of the link.
|
||||||
|
//
|
||||||
|
// The value in info.Size() is not always correct
|
||||||
|
// - Windows links read as 0 size
|
||||||
|
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
|
||||||
|
// - Android - some versions the links are larger than readlink suggests
|
||||||
|
if o.translatedLink {
|
||||||
|
linkdst, err := os.Readlink(o.path)
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||||
|
} else {
|
||||||
|
o.size = int64(len(linkdst))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat an Object into info
|
// Stat an Object into info
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -89,9 +88,6 @@ func TestSymlink(t *testing.T) {
|
|||||||
|
|
||||||
// Object viewed as symlink
|
// Object viewed as symlink
|
||||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
file2.Size = 0 // symlinks are 0 length under Windows
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object viewed as destination
|
// Object viewed as destination
|
||||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||||
@@ -121,9 +117,6 @@ func TestSymlink(t *testing.T) {
|
|||||||
// Create a symlink
|
// Create a symlink
|
||||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
file3.Size = 0 // symlinks are 0 length under Windows
|
|
||||||
}
|
|
||||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||||
if haveLChtimes {
|
if haveLChtimes {
|
||||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||||
@@ -142,9 +135,7 @@ func TestSymlink(t *testing.T) {
|
|||||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||||
if runtime.GOOS != "windows" {
|
assert.Equal(t, int64(8), o.Size())
|
||||||
assert.Equal(t, int64(8), o.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that NewObject doesn't see the non suffixed version
|
// Check that NewObject doesn't see the non suffixed version
|
||||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||||
@@ -172,6 +163,6 @@ func TestSymlinkError(t *testing.T) {
|
|||||||
"links": "true",
|
"links": "true",
|
||||||
"copy_links": "true",
|
"copy_links": "true",
|
||||||
}
|
}
|
||||||
_, err := NewFs("local", "/", m)
|
_, err := NewFs(context.Background(), "local", "/", m)
|
||||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||||
}
|
}
|
||||||
|
|||||||
22
backend/local/symlink.go
Normal file
22
backend/local/symlink.go
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
// +build !windows,!plan9,!js
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// isCircularSymlinkError checks if the current error code is because of a circular symlink
|
||||||
|
func isCircularSymlinkError(err error) bool {
|
||||||
|
if err != nil {
|
||||||
|
if newerr, ok := err.(*os.PathError); ok {
|
||||||
|
if errcode, ok := newerr.Err.(syscall.Errno); ok {
|
||||||
|
if errcode == syscall.ELOOP {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
17
backend/local/symlink_other.go
Normal file
17
backend/local/symlink_other.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
// +build windows plan9 js
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// isCircularSymlinkError checks if the current error code is because of a circular symlink
|
||||||
|
func isCircularSymlinkError(err error) bool {
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "The name of the file cannot be resolved by the system") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
@@ -6,8 +6,8 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -48,7 +48,7 @@ func (w *BinWriter) Reader() io.Reader {
|
|||||||
// WritePu16 writes a short as unsigned varint
|
// WritePu16 writes a short as unsigned varint
|
||||||
func (w *BinWriter) WritePu16(val int) {
|
func (w *BinWriter) WritePu16(val int) {
|
||||||
if val < 0 || val > 65535 {
|
if val < 0 || val > 65535 {
|
||||||
log.Fatalf("Invalid UInt16 %v", val)
|
panic(fmt.Sprintf("Invalid UInt16 %v", val))
|
||||||
}
|
}
|
||||||
w.WritePu64(int64(val))
|
w.WritePu64(int64(val))
|
||||||
}
|
}
|
||||||
@@ -56,7 +56,7 @@ func (w *BinWriter) WritePu16(val int) {
|
|||||||
// WritePu32 writes a signed long as unsigned varint
|
// WritePu32 writes a signed long as unsigned varint
|
||||||
func (w *BinWriter) WritePu32(val int64) {
|
func (w *BinWriter) WritePu32(val int64) {
|
||||||
if val < 0 || val > 4294967295 {
|
if val < 0 || val > 4294967295 {
|
||||||
log.Fatalf("Invalid UInt32 %v", val)
|
panic(fmt.Sprintf("Invalid UInt32 %v", val))
|
||||||
}
|
}
|
||||||
w.WritePu64(val)
|
w.WritePu64(val)
|
||||||
}
|
}
|
||||||
@@ -64,7 +64,7 @@ func (w *BinWriter) WritePu32(val int64) {
|
|||||||
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
|
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
|
||||||
func (w *BinWriter) WritePu64(val int64) {
|
func (w *BinWriter) WritePu64(val int64) {
|
||||||
if val < 0 {
|
if val < 0 {
|
||||||
log.Fatalf("Invalid UInt64 %v", val)
|
panic(fmt.Sprintf("Invalid UInt64 %v", val))
|
||||||
}
|
}
|
||||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||||
}
|
}
|
||||||
@@ -123,7 +123,7 @@ func (r *BinReader) check(err error) bool {
|
|||||||
r.err = err
|
r.err = err
|
||||||
}
|
}
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
log.Fatalf("Error parsing response: %v", err)
|
panic(fmt.Sprintf("Error parsing response: %v", err))
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ type ListItem struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Home string `json:"home"`
|
Home string `json:"home"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Mtime int64 `json:"mtime,omitempty"`
|
Mtime uint64 `json:"mtime,omitempty"`
|
||||||
Hash string `json:"hash,omitempty"`
|
Hash string `json:"hash,omitempty"`
|
||||||
VirusScan string `json:"virus_scan,omitempty"`
|
VirusScan string `json:"virus_scan,omitempty"`
|
||||||
Tree string `json:"tree,omitempty"`
|
Tree string `json:"tree,omitempty"`
|
||||||
@@ -159,71 +159,6 @@ type FolderInfoResponse struct {
|
|||||||
Email string `json:"email"`
|
Email string `json:"email"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShardInfoResponse ...
|
|
||||||
type ShardInfoResponse struct {
|
|
||||||
Email string `json:"email"`
|
|
||||||
Body struct {
|
|
||||||
Video []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"video"`
|
|
||||||
ViewDirect []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"view_direct"`
|
|
||||||
WeblinkView []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_view"`
|
|
||||||
WeblinkVideo []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_video"`
|
|
||||||
WeblinkGet []struct {
|
|
||||||
Count int `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_get"`
|
|
||||||
Stock []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"stock"`
|
|
||||||
WeblinkThumbnails []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_thumbnails"`
|
|
||||||
PublicUpload []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"public_upload"`
|
|
||||||
Auth []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"auth"`
|
|
||||||
Web []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"web"`
|
|
||||||
View []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"view"`
|
|
||||||
Upload []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"upload"`
|
|
||||||
Get []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"get"`
|
|
||||||
Thumbnails []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"thumbnails"`
|
|
||||||
} `json:"body"`
|
|
||||||
Time int64 `json:"time"`
|
|
||||||
Status int `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanupResponse ...
|
// CleanupResponse ...
|
||||||
type CleanupResponse struct {
|
type CleanupResponse struct {
|
||||||
Email string `json:"email"`
|
Email string `json:"email"`
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user