mirror of
https://github.com/rclone/rclone.git
synced 2025-12-15 15:53:41 +00:00
Compare commits
1146 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a73ecec11f | ||
|
|
c223464cd0 | ||
|
|
39d09c04a2 | ||
|
|
db5494b316 | ||
|
|
c3dab09a94 | ||
|
|
3ddcbce989 | ||
|
|
0cf19ef66a | ||
|
|
655891170f | ||
|
|
93423a0812 | ||
|
|
78f33f5d6e | ||
|
|
209b7da3b2 | ||
|
|
6f71260acf | ||
|
|
ec6c3f2686 | ||
|
|
62e28d0a72 | ||
|
|
470642f2b7 | ||
|
|
b5002eb6a4 | ||
|
|
ee5698b3a9 | ||
|
|
728ff231ab | ||
|
|
542f938ce2 | ||
|
|
e24d0ac94d | ||
|
|
da2e2544ee | ||
|
|
72add5ab27 | ||
|
|
9ac72ee53f | ||
|
|
c3dac2e385 | ||
|
|
92294a4a92 | ||
|
|
69ff009264 | ||
|
|
27b157580e | ||
|
|
3f288bc9ea | ||
|
|
ce1b9a7daf | ||
|
|
f0512d1a52 | ||
|
|
51866fbd34 | ||
|
|
ee13bc6775 | ||
|
|
e86f62c3e8 | ||
|
|
6c3bf629a1 | ||
|
|
575e779b55 | ||
|
|
dc56ad9816 | ||
|
|
e7d04fc103 | ||
|
|
e2d7d413ef | ||
|
|
e7e9aa0dfa | ||
|
|
f88300a153 | ||
|
|
e54087ece1 | ||
|
|
54561fd2bc | ||
|
|
479c5a514a | ||
|
|
f3c7e1a9dd | ||
|
|
70b5b2f5c6 | ||
|
|
d7811f72ad | ||
|
|
aa20486485 | ||
|
|
33f302a06b | ||
|
|
24cb739d1f | ||
|
|
f0abd6173d | ||
|
|
1817d8f631 | ||
|
|
a308ad5bd7 | ||
|
|
b360527931 | ||
|
|
52b042971a | ||
|
|
2d2778eabf | ||
|
|
d55f8f0492 | ||
|
|
b44d0ea088 | ||
|
|
d981456ddc | ||
|
|
b22c4c4307 | ||
|
|
afc8cc550a | ||
|
|
83b642e98f | ||
|
|
d5d635b7f3 | ||
|
|
6b89e6c381 | ||
|
|
be0dd09801 | ||
|
|
b76cd4abd2 | ||
|
|
0dbf1230bc | ||
|
|
4fd9570332 | ||
|
|
8d77e48190 | ||
|
|
dcce65b2b3 | ||
|
|
4ce31555b2 | ||
|
|
5ed4bc97f3 | ||
|
|
54e37be591 | ||
|
|
eaa717b88a | ||
|
|
bbbc202ee6 | ||
|
|
97364fd0b6 | ||
|
|
c34f11a92f | ||
|
|
e31fc877e2 | ||
|
|
e069fc439e | ||
|
|
5250fcdf08 | ||
|
|
9876ba53f8 | ||
|
|
64662bef8d | ||
|
|
0b8d9084fc | ||
|
|
7be49249d3 | ||
|
|
8a6a8b9623 | ||
|
|
6fc88ff32e | ||
|
|
50928a5027 | ||
|
|
3a431056e2 | ||
|
|
53c3e5f0ab | ||
|
|
0edb025257 | ||
|
|
fded4dbea2 | ||
|
|
7e20e16cff | ||
|
|
1e88f0702a | ||
|
|
68333d34a1 | ||
|
|
740b3f6ae2 | ||
|
|
28fcc53e45 | ||
|
|
2ca477c57f | ||
|
|
9a11d3efd9 | ||
|
|
10d5377ed8 | ||
|
|
ee14efd3c2 | ||
|
|
b4be7d65a6 | ||
|
|
52e1bfae2a | ||
|
|
9c1e703777 | ||
|
|
b49821956a | ||
|
|
a61ba1e7c4 | ||
|
|
d30cc1e119 | ||
|
|
74a3dfc4e1 | ||
|
|
3fe9448229 | ||
|
|
a5cfdfd233 | ||
|
|
bdc19b7c8a | ||
|
|
e92cc8fe2b | ||
|
|
6ee4c62cae | ||
|
|
b047402294 | ||
|
|
7693cecd17 | ||
|
|
558f014d43 | ||
|
|
48508cb5b7 | ||
|
|
44c98e8654 | ||
|
|
9782c264e9 | ||
|
|
9cede6b372 | ||
|
|
decd960867 | ||
|
|
71028e0f06 | ||
|
|
52e96bc0e2 | ||
|
|
178ff62d6a | ||
|
|
9d335eb5cb | ||
|
|
20da3e6352 | ||
|
|
6381959850 | ||
|
|
8916455e4f | ||
|
|
8e214e838e | ||
|
|
23acd3ce01 | ||
|
|
a2e3af0523 | ||
|
|
5455d34f8c | ||
|
|
84512ac77d | ||
|
|
1ec0327ed7 | ||
|
|
0f07b63fd1 | ||
|
|
88ef475629 | ||
|
|
ade61fa756 | ||
|
|
cfc5f7bb2d | ||
|
|
ae9f8304fa | ||
|
|
55755a8e5b | ||
|
|
080050fac2 | ||
|
|
a243ea6353 | ||
|
|
51d2174c0b | ||
|
|
e75db0b14d | ||
|
|
c59a292719 | ||
|
|
be5b8b8dff | ||
|
|
525220b14e | ||
|
|
a9d29c2264 | ||
|
|
8f54dc06a2 | ||
|
|
7daf97f90a | ||
|
|
2cae017738 | ||
|
|
e172f00e0e | ||
|
|
412dacf8be | ||
|
|
cdacf026e4 | ||
|
|
0ca6408580 | ||
|
|
9627a6142d | ||
|
|
6cc783f20b | ||
|
|
3136a75f4d | ||
|
|
a9101f8608 | ||
|
|
af043eda15 | ||
|
|
35c210d36f | ||
|
|
3ed0440bd2 | ||
|
|
c13cff37ef | ||
|
|
fce734662f | ||
|
|
e0ba1a2cd2 | ||
|
|
c72fca2711 | ||
|
|
ae17d88518 | ||
|
|
e19fc49a5f | ||
|
|
95c0378e3c | ||
|
|
7ee3cfd7c9 | ||
|
|
bd2cdeeeab | ||
|
|
77cd93ef89 | ||
|
|
5b063679b5 | ||
|
|
09093a9954 | ||
|
|
df0cfa9735 | ||
|
|
64d7489fd2 | ||
|
|
ecedcd0e7f | ||
|
|
3dff91d691 | ||
|
|
e131ef3714 | ||
|
|
ea0bc278ba | ||
|
|
b553c23d5b | ||
|
|
4f954896a8 | ||
|
|
b259f8b752 | ||
|
|
8be8a8e41b | ||
|
|
79aa060e21 | ||
|
|
f9500729b7 | ||
|
|
204a19e67f | ||
|
|
e6ffe3464c | ||
|
|
0384364c3e | ||
|
|
763facfd78 | ||
|
|
bc88f1dafa | ||
|
|
0c055a1215 | ||
|
|
938d7951ab | ||
|
|
b4466bd9b1 | ||
|
|
31f76aa464 | ||
|
|
c887c164dc | ||
|
|
115ac00222 | ||
|
|
50e79bc087 | ||
|
|
abda616f84 | ||
|
|
9c3048580a | ||
|
|
c1d5faa32a | ||
|
|
d127d8686a | ||
|
|
bc9856b570 | ||
|
|
855071cc19 | ||
|
|
b179540e80 | ||
|
|
6a8e4690d3 | ||
|
|
917ea6ac57 | ||
|
|
7b47a1e842 | ||
|
|
bcd87009e2 | ||
|
|
caf85737c3 | ||
|
|
e1516e0159 | ||
|
|
ee1111e4c9 | ||
|
|
268fe0004c | ||
|
|
0c92a64bb3 | ||
|
|
8b61692754 | ||
|
|
663e6f3ec0 | ||
|
|
17633f5460 | ||
|
|
98c2d2c41b | ||
|
|
5135ff73cb | ||
|
|
58a82cd578 | ||
|
|
d86ea8623b | ||
|
|
cdeeff988e | ||
|
|
930ff266f2 | ||
|
|
d5c0fe632f | ||
|
|
3c5c5eeec2 | ||
|
|
56f017c60c | ||
|
|
b6517840ca | ||
|
|
1ccfea5aa9 | ||
|
|
7e858f4b8d | ||
|
|
7b4f368307 | ||
|
|
06a3502ed8 | ||
|
|
a9a43144ca | ||
|
|
dd968a8ccf | ||
|
|
0d6e1afe54 | ||
|
|
7d9faffd4b | ||
|
|
d7df065320 | ||
|
|
84d4d7f9d9 | ||
|
|
733d6fe56c | ||
|
|
8350544092 | ||
|
|
6a63bc2788 | ||
|
|
66e8c1600e | ||
|
|
82b8d68ffb | ||
|
|
b86bbcd67e | ||
|
|
38b6d607aa | ||
|
|
e1647a5a08 | ||
|
|
bc25190fc7 | ||
|
|
e3a41321cc | ||
|
|
2fd86c93fc | ||
|
|
2b8c461e04 | ||
|
|
a54692d165 | ||
|
|
4b4c59a4bb | ||
|
|
81d688107e | ||
|
|
6e003934fc | ||
|
|
37e1b20ec1 | ||
|
|
d1787b50fd | ||
|
|
9dfc346998 | ||
|
|
9ab4c19945 | ||
|
|
3bab119fa5 | ||
|
|
1fdf3e2aae | ||
|
|
4810aa65a4 | ||
|
|
f798552cf1 | ||
|
|
4dc030d081 | ||
|
|
216499d78b | ||
|
|
60f636ee15 | ||
|
|
f0bf117a04 | ||
|
|
788b6ce821 | ||
|
|
503cd84919 | ||
|
|
118e26f8e2 | ||
|
|
5355881332 | ||
|
|
b94b50a808 | ||
|
|
9b07d32c02 | ||
|
|
986a2851bf | ||
|
|
6474f2c7c2 | ||
|
|
99f7fe736a | ||
|
|
e80d8db417 | ||
|
|
320c53eab0 | ||
|
|
4d5b73df85 | ||
|
|
0faf82702b | ||
|
|
194a8f56e1 | ||
|
|
f046c00d3b | ||
|
|
488353c977 | ||
|
|
c45c604997 | ||
|
|
b2a4ea9304 | ||
|
|
8dc7bf883d | ||
|
|
61f186c8a3 | ||
|
|
e88623e3c8 | ||
|
|
4652db34a4 | ||
|
|
05d72385b5 | ||
|
|
9bb408e1a9 | ||
|
|
10e532bce9 | ||
|
|
4ab7e05e02 | ||
|
|
1cc58e4e09 | ||
|
|
fdaac6df67 | ||
|
|
1d42a343d2 | ||
|
|
0ce34be41d | ||
|
|
5fba913207 | ||
|
|
f7252645ba | ||
|
|
e48d19f895 | ||
|
|
6bad0ad9c4 | ||
|
|
dc5b7dc102 | ||
|
|
55eafb3a9a | ||
|
|
5b6dd36307 | ||
|
|
175c39e1d0 | ||
|
|
84b12574de | ||
|
|
efbb040e3f | ||
|
|
79e3c67bbd | ||
|
|
527099ae72 | ||
|
|
e2f0feef3c | ||
|
|
30e97ad9ec | ||
|
|
07dc76eff0 | ||
|
|
e59dc81658 | ||
|
|
f40443359d | ||
|
|
6b0f2ef4bd | ||
|
|
12aa03f5b8 | ||
|
|
73a96dc588 | ||
|
|
980cd5bfd8 | ||
|
|
86cc9f3dfb | ||
|
|
1ae604fcf4 | ||
|
|
5e93fe96d3 | ||
|
|
31745320c8 | ||
|
|
2da6cd7f84 | ||
|
|
6e0e1ad9cb | ||
|
|
dd62c94d05 | ||
|
|
ee70b99143 | ||
|
|
b3a526814e | ||
|
|
69a15ae173 | ||
|
|
1d7f95da8e | ||
|
|
8ec57d145e | ||
|
|
3ef9f6f016 | ||
|
|
990b676e13 | ||
|
|
5cdfe9c7ae | ||
|
|
033d1eb7af | ||
|
|
ac62ef430d | ||
|
|
928be0f1fd | ||
|
|
6f75290678 | ||
|
|
8c2b50c7ed | ||
|
|
2b1695e09b | ||
|
|
ef604f6100 | ||
|
|
f3c5745468 | ||
|
|
e4835f535d | ||
|
|
33c2873ae9 | ||
|
|
dac4bb22d3 | ||
|
|
b52c80e85c | ||
|
|
f15c6b68b6 | ||
|
|
3f778d70f7 | ||
|
|
6fc114d681 | ||
|
|
9a9d09845c | ||
|
|
7fa687b3e1 | ||
|
|
493da54113 | ||
|
|
541929258b | ||
|
|
370f242fa2 | ||
|
|
7047c67a5e | ||
|
|
18c75a81f9 | ||
|
|
01c747e7db | ||
|
|
186aedda98 | ||
|
|
ca0e25b1a1 | ||
|
|
f87a694d10 | ||
|
|
006227baed | ||
|
|
4d28b5ed22 | ||
|
|
499475bb41 | ||
|
|
666dae4229 | ||
|
|
ac1c041377 | ||
|
|
0366ea39c5 | ||
|
|
80f53176d9 | ||
|
|
40c02989f1 | ||
|
|
50e190ff54 | ||
|
|
dd20a297d6 | ||
|
|
c0ad29c06c | ||
|
|
d091d4a8bb | ||
|
|
381b845307 | ||
|
|
48cdedc97b | ||
|
|
7c6cd3a9e1 | ||
|
|
bcdd73369f | ||
|
|
86bec20b56 | ||
|
|
c3b2b89473 | ||
|
|
85f05c57d1 | ||
|
|
16d91246c4 | ||
|
|
726cb43be9 | ||
|
|
288302c2cf | ||
|
|
609671aabc | ||
|
|
b9a8315696 | ||
|
|
27e18b6efa | ||
|
|
9d331ce04b | ||
|
|
916569102c | ||
|
|
28f9b9b611 | ||
|
|
7679620f4b | ||
|
|
8a11da4e14 | ||
|
|
f11867d810 | ||
|
|
6f8501e9a1 | ||
|
|
37fe6d56e5 | ||
|
|
ff8f11d79c | ||
|
|
cbc113492a | ||
|
|
74702554da | ||
|
|
bd29015022 | ||
|
|
2192805360 | ||
|
|
db0b93c0ad | ||
|
|
94947f2523 | ||
|
|
29c6e22024 | ||
|
|
390f3cf35b | ||
|
|
20c033b484 | ||
|
|
8068ef96b6 | ||
|
|
9d36258923 | ||
|
|
9fdeb82328 | ||
|
|
2abfae283c | ||
|
|
b6848a3edb | ||
|
|
e2bf9eb8e9 | ||
|
|
a77659e47d | ||
|
|
e9da14ac2e | ||
|
|
a4bf22e620 | ||
|
|
a6b4065e13 | ||
|
|
07ebf35987 | ||
|
|
47ebd0789c | ||
|
|
166fd50451 | ||
|
|
0604d3dbf2 | ||
|
|
1fa258c2b4 | ||
|
|
3745c526f1 | ||
|
|
c123c702ab | ||
|
|
4aae7bcca6 | ||
|
|
aa62e93094 | ||
|
|
45862f4c16 | ||
|
|
3b1e0b66bb | ||
|
|
a7d8ccd265 | ||
|
|
d4c923a5cc | ||
|
|
e426cb1d1a | ||
|
|
3c87a0d0dc | ||
|
|
499766f6ab | ||
|
|
35a6436983 | ||
|
|
341745d4d5 | ||
|
|
78c1f2839e | ||
|
|
de2d967abd | ||
|
|
6611d92e21 | ||
|
|
e1a49ca426 | ||
|
|
f73ee5eade | ||
|
|
0d75d2585f | ||
|
|
3b0f944e23 | ||
|
|
aaeab58ce6 | ||
|
|
5894c02a34 | ||
|
|
f1221b510b | ||
|
|
274ab349f4 | ||
|
|
86392fb800 | ||
|
|
adc156ab2a | ||
|
|
47d3a450a4 | ||
|
|
5c89fd679d | ||
|
|
1cad759306 | ||
|
|
5b8b379feb | ||
|
|
f538fd8eb4 | ||
|
|
4dd5428b13 | ||
|
|
64ec220d5d | ||
|
|
cbfec0d281 | ||
|
|
80c044f2d3 | ||
|
|
1b2dda8c4c | ||
|
|
473bdad00b | ||
|
|
4482e75f38 | ||
|
|
43c530922a | ||
|
|
dd60f088ed | ||
|
|
0117aeafbf | ||
|
|
442861581a | ||
|
|
4e809c951d | ||
|
|
215fd2a11d | ||
|
|
13b705e227 | ||
|
|
8083804575 | ||
|
|
ec0916c59d | ||
|
|
7392cd1a1a | ||
|
|
2656a0e070 | ||
|
|
5b5df9ae8e | ||
|
|
fafbcc8e2f | ||
|
|
c55402caa2 | ||
|
|
d132dc7640 | ||
|
|
48a2e3844d | ||
|
|
d911bf3889 | ||
|
|
dcf53a1d12 | ||
|
|
3bdfa284a9 | ||
|
|
cb9f1eefd2 | ||
|
|
dd99a4b3dc | ||
|
|
e79a5de7df | ||
|
|
c24da0b886 | ||
|
|
be4fd51289 | ||
|
|
2cbdb95ce5 | ||
|
|
716ce49ce9 | ||
|
|
34b9ac8a5d | ||
|
|
c265f451f2 | ||
|
|
2058652fa4 | ||
|
|
50b3cfccb1 | ||
|
|
5e35aeca9e | ||
|
|
05798672c8 | ||
|
|
7929b6e756 | ||
|
|
2756900749 | ||
|
|
539853df36 | ||
|
|
651db36674 | ||
|
|
f9df545e3c | ||
|
|
5e62ede8d0 | ||
|
|
7f41c9a015 | ||
|
|
ac7727861e | ||
|
|
943a0938e7 | ||
|
|
6580d9478e | ||
|
|
36d411c25d | ||
|
|
8aae166a5b | ||
|
|
aaad0354e6 | ||
|
|
f3365dd251 | ||
|
|
aaa1370a36 | ||
|
|
c41b67ea08 | ||
|
|
0b562bcabc | ||
|
|
1e41a015b5 | ||
|
|
8b82cc7073 | ||
|
|
e19b30bd26 | ||
|
|
09897c8d0d | ||
|
|
d4ddbcea96 | ||
|
|
00af021abb | ||
|
|
8118623680 | ||
|
|
2c594dd996 | ||
|
|
d8b7156b5c | ||
|
|
d4a609c6cd | ||
|
|
bf243f30d3 | ||
|
|
3ce82facac | ||
|
|
fb1458815a | ||
|
|
2243b065e8 | ||
|
|
718694d5ee | ||
|
|
77f38cb6f1 | ||
|
|
ca017980a3 | ||
|
|
4105da206a | ||
|
|
34e7ca90fc | ||
|
|
687abe7803 | ||
|
|
9b1820a7ad | ||
|
|
5f320cc540 | ||
|
|
23b8f008e0 | ||
|
|
d95288175f | ||
|
|
b83f7ac06b | ||
|
|
f7af730b50 | ||
|
|
01be5bff02 | ||
|
|
e825df6448 | ||
|
|
ff41b0d435 | ||
|
|
e162377ca3 | ||
|
|
d1080d5456 | ||
|
|
64b5a76bec | ||
|
|
7cfb1bdc70 | ||
|
|
441951a93b | ||
|
|
154e91bb23 | ||
|
|
cb40511807 | ||
|
|
452c68115f | ||
|
|
b35123ba48 | ||
|
|
978e06a623 | ||
|
|
15c9fed60f | ||
|
|
2302179237 | ||
|
|
318e335137 | ||
|
|
11301a64fb | ||
|
|
1c912de9cc | ||
|
|
d1759fdfa9 | ||
|
|
c102bf28e3 | ||
|
|
e65059e431 | ||
|
|
5454f2abd0 | ||
|
|
cc4f5ba7ba | ||
|
|
062616e4dd | ||
|
|
6846a1cc11 | ||
|
|
6fd5ef2d99 | ||
|
|
87107413f5 | ||
|
|
5986953317 | ||
|
|
9d2dd2c49a | ||
|
|
54d99d6ab2 | ||
|
|
77b975d16f | ||
|
|
c464cc6376 | ||
|
|
93e84403bb | ||
|
|
5b8327038a | ||
|
|
eba0a3633b | ||
|
|
de73063977 | ||
|
|
eca9e8eb70 | ||
|
|
a4a44a41ae | ||
|
|
a02edb9e69 | ||
|
|
368cce93ff | ||
|
|
d8d11023d3 | ||
|
|
4803ce010e | ||
|
|
b7875fc02a | ||
|
|
544ca6035a | ||
|
|
0238558a4b | ||
|
|
bc414b698d | ||
|
|
ace1e21894 | ||
|
|
8a56a6836a | ||
|
|
83849e0a36 | ||
|
|
618f2e33e8 | ||
|
|
fe53caf997 | ||
|
|
d83074ae05 | ||
|
|
0cef6bd0ac | ||
|
|
d42b38699b | ||
|
|
98804cb860 | ||
|
|
d033e92234 | ||
|
|
ec7cef98d8 | ||
|
|
aedad89560 | ||
|
|
f45b3c87bf | ||
|
|
e94850f322 | ||
|
|
de80a540a7 | ||
|
|
392a86f585 | ||
|
|
265f5b77a7 | ||
|
|
aef2ac5c04 | ||
|
|
75e5e59385 | ||
|
|
6c21009c76 | ||
|
|
9192e0a28d | ||
|
|
47e201837f | ||
|
|
4847c5695c | ||
|
|
391feb698e | ||
|
|
a4714e5b75 | ||
|
|
4dae5ee264 | ||
|
|
7e9739db57 | ||
|
|
1e557f4bd9 | ||
|
|
ca19204cf4 | ||
|
|
03977354cb | ||
|
|
c43395fafa | ||
|
|
7cf6fe2209 | ||
|
|
9ea20bac42 | ||
|
|
945f49ab5e | ||
|
|
6c9a258d82 | ||
|
|
f2eeb4301c | ||
|
|
c117eaf5a2 | ||
|
|
3e43ff7414 | ||
|
|
bb21cf6f0e | ||
|
|
bfe6f299d0 | ||
|
|
e19ba47875 | ||
|
|
7227a2653d | ||
|
|
61665ddd10 | ||
|
|
0caac70994 | ||
|
|
83ba59749f | ||
|
|
20a429c048 | ||
|
|
cf43ca2a7b | ||
|
|
4001e21624 | ||
|
|
bbf819e2d1 | ||
|
|
0cb9bb3b54 | ||
|
|
5c91623148 | ||
|
|
5b913884cf | ||
|
|
346d4c587c | ||
|
|
d5b16c8b1a | ||
|
|
e78eeedc75 | ||
|
|
87db3cfad3 | ||
|
|
54fdc6866e | ||
|
|
2eaac80c86 | ||
|
|
b3d0848d09 | ||
|
|
0c6990bc95 | ||
|
|
d9bba67d18 | ||
|
|
140a3d0aef | ||
|
|
31fe800d6a | ||
|
|
3996bbb8cb | ||
|
|
c2599cb116 | ||
|
|
2c13074f6c | ||
|
|
059743a1b0 | ||
|
|
73cd1f4e88 | ||
|
|
a54806e5c1 | ||
|
|
e6a0521ca2 | ||
|
|
43eadf278c | ||
|
|
5f375a182d | ||
|
|
663dd6ed8b | ||
|
|
226c2a0d83 | ||
|
|
b4b4b6cb1c | ||
|
|
9985fc40f4 | ||
|
|
b1de4c8cba | ||
|
|
6a4e424630 | ||
|
|
ebb67c135e | ||
|
|
326dcf2470 | ||
|
|
86eb80ecdc | ||
|
|
2003ba356b | ||
|
|
037a000cc8 | ||
|
|
8a771450d2 | ||
|
|
1e7dc06ab8 | ||
|
|
ca841c56a8 | ||
|
|
79eebf1993 | ||
|
|
bbccf4acd5 | ||
|
|
9e7ddd5efc | ||
|
|
6089f443b9 | ||
|
|
84eb7031bb | ||
|
|
f22029bf3d | ||
|
|
d7b79b4481 | ||
|
|
b5faaf7116 | ||
|
|
b4f2ada820 | ||
|
|
8a66930bd7 | ||
|
|
2ebeed6753 | ||
|
|
23d8ba41d5 | ||
|
|
4f9e805d44 | ||
|
|
3f7107839e | ||
|
|
bb62c49489 | ||
|
|
ae6018355c | ||
|
|
0805ec051f | ||
|
|
e27b91ffb8 | ||
|
|
0a7b34eefc | ||
|
|
549cac90af | ||
|
|
ba0b41dd92 | ||
|
|
2df261e42b | ||
|
|
38adb35abe | ||
|
|
520ded60e3 | ||
|
|
ae56df7d4f | ||
|
|
412591dfaf | ||
|
|
57f8f1ec92 | ||
|
|
f0434789cf | ||
|
|
c2f6decb9c | ||
|
|
9eeed25418 | ||
|
|
67562081f7 | ||
|
|
41917eb1f2 | ||
|
|
c3e996f10f | ||
|
|
63f6827a0d | ||
|
|
96e2271cce | ||
|
|
ac3c83f966 | ||
|
|
b9c8e61d39 | ||
|
|
a6056408dd | ||
|
|
b9479cf7ab | ||
|
|
452a5badc1 | ||
|
|
d645bf0966 | ||
|
|
50addaa91e | ||
|
|
02a3bbaa3d | ||
|
|
a20d80565b | ||
|
|
56adb52a21 | ||
|
|
8c2fc6daf8 | ||
|
|
4bd9932703 | ||
|
|
2a1d4b7563 | ||
|
|
b394431f18 | ||
|
|
cc628717d8 | ||
|
|
f3e00133a0 | ||
|
|
606961f49d | ||
|
|
13591c7c00 | ||
|
|
28f4061892 | ||
|
|
018fe80bcb | ||
|
|
0a43ff9c13 | ||
|
|
9aae143833 | ||
|
|
c8e2531c8b | ||
|
|
9290004bb8 | ||
|
|
cbebefebc4 | ||
|
|
6f3897ce2c | ||
|
|
ea5878f590 | ||
|
|
46f8e50614 | ||
|
|
70dc97231e | ||
|
|
f6a053df6e | ||
|
|
af4ef8ad8d | ||
|
|
13797a1fb8 | ||
|
|
3ad8fb8634 | ||
|
|
ab43005422 | ||
|
|
b1f131964e | ||
|
|
1a87b69376 | ||
|
|
5a3b109e25 | ||
|
|
a67c7461ee | ||
|
|
e0aa4bb492 | ||
|
|
ab0947ee37 | ||
|
|
bd0227450e | ||
|
|
f438f1e9ef | ||
|
|
3f7b2c1ade | ||
|
|
6e35a3b3ce | ||
|
|
d3dd672640 | ||
|
|
2a46be8cf3 | ||
|
|
1b4370bde1 | ||
|
|
cc6a776034 | ||
|
|
2cfb3834f2 | ||
|
|
46135d830e | ||
|
|
318e42e35b | ||
|
|
c7f04e24d3 | ||
|
|
e4650eff58 | ||
|
|
869d91269d | ||
|
|
df1092ef33 | ||
|
|
4c5b2833b3 | ||
|
|
7fe653c350 | ||
|
|
661715733a | ||
|
|
f17cb1bf50 | ||
|
|
9ec06df79f | ||
|
|
67d0375b98 | ||
|
|
4882b8ba67 | ||
|
|
108760e17b | ||
|
|
f15e7e89d2 | ||
|
|
e2788aa729 | ||
|
|
772f99fd74 | ||
|
|
9bbcdeefd0 | ||
|
|
a21cc161de | ||
|
|
e818b7c206 | ||
|
|
5723d788a4 | ||
|
|
1d6698a754 | ||
|
|
1fce83b936 | ||
|
|
ccdd1ea6c4 | ||
|
|
348734584b | ||
|
|
c6a79ff72d | ||
|
|
b6f1391da3 | ||
|
|
ce94c0e729 | ||
|
|
58befe280c | ||
|
|
4c0f4ccb65 | ||
|
|
085677d511 | ||
|
|
0a922ad1dc | ||
|
|
83c3bb2f1a | ||
|
|
83087a45f0 | ||
|
|
cadf202107 | ||
|
|
36700d36a7 | ||
|
|
ad85f6e413 | ||
|
|
536526cc92 | ||
|
|
ac9c20b048 | ||
|
|
2db35f0ce7 | ||
|
|
dbfa7031d2 | ||
|
|
c2d0e86431 | ||
|
|
68ec6a9f5b | ||
|
|
753b0717be | ||
|
|
3bdad260b0 | ||
|
|
d205dc23e9 | ||
|
|
bdd26d71b2 | ||
|
|
8b2f6faf18 | ||
|
|
7c01bbddf8 | ||
|
|
1752ee3c8b | ||
|
|
5c2d8ffe33 | ||
|
|
7fecd5c8c6 | ||
|
|
19b7ff12ad | ||
|
|
b053234eb1 | ||
|
|
640d7bd365 | ||
|
|
8af68e779f | ||
|
|
3a1198cac5 | ||
|
|
022ab4516d | ||
|
|
17aac9b15f | ||
|
|
6c0c9abd57 | ||
|
|
70496c15e1 | ||
|
|
8b61e68bb7 | ||
|
|
bb75d80d33 | ||
|
|
157d7d45f5 | ||
|
|
b5cba73cc3 | ||
|
|
dd36264aad | ||
|
|
ddb47758f3 | ||
|
|
9539bbf78a | ||
|
|
0f8e7c3843 | ||
|
|
b835330714 | ||
|
|
310db14ed6 | ||
|
|
7f2e9d9a6b | ||
|
|
6cc9c09610 | ||
|
|
93c60c34e1 | ||
|
|
02c11dd4a7 | ||
|
|
40dc575aa4 | ||
|
|
f8101771c9 | ||
|
|
8f4d6973fb | ||
|
|
ced3a4bc19 | ||
|
|
cb22583212 | ||
|
|
414b35ea56 | ||
|
|
f469905d07 | ||
|
|
20f4b2c91d | ||
|
|
37543bd1d9 | ||
|
|
0dc0052e93 | ||
|
|
bd27473762 | ||
|
|
9dccf91da7 | ||
|
|
a1323eb204 | ||
|
|
e57c4406f3 | ||
|
|
fdd4b4ee22 | ||
|
|
8ef551bf9c | ||
|
|
2119fb4314 | ||
|
|
0166544319 | ||
|
|
874a64e5f6 | ||
|
|
e0c03a11ab | ||
|
|
3c7f80f58f | ||
|
|
229ea3f86c | ||
|
|
41eb386063 | ||
|
|
dfc7cd97a3 | ||
|
|
280ac26464 | ||
|
|
88cca8a6eb | ||
|
|
9c263e3e2b | ||
|
|
7d4e143dee | ||
|
|
3343c1afa4 | ||
|
|
b279df2e67 | ||
|
|
e6f340d245 | ||
|
|
bfc66cceaa | ||
|
|
1105b6bd94 | ||
|
|
694d390710 | ||
|
|
6b6b43402b | ||
|
|
6f46270735 | ||
|
|
ee5e34a19c | ||
|
|
70902b4051 | ||
|
|
f46304e8ae | ||
|
|
40252f0aa6 | ||
|
|
e7b9cc4705 | ||
|
|
867a26fe4f | ||
|
|
3890105cdc | ||
|
|
d2219a800a | ||
|
|
ccb59480bd | ||
|
|
b5c5209162 | ||
|
|
835b6761b7 | ||
|
|
f30c836696 | ||
|
|
090ce00afc | ||
|
|
377986d599 | ||
|
|
95e4d837ef | ||
|
|
e08e35984c | ||
|
|
a3b4c8a0f2 | ||
|
|
700e47d6e2 | ||
|
|
ea11f5ff3d | ||
|
|
758c7f2d84 | ||
|
|
ef06371c93 | ||
|
|
85a0f25b95 | ||
|
|
84b00b362f | ||
|
|
bfd7601cf9 | ||
|
|
4676a89963 | ||
|
|
8cd3c25b41 | ||
|
|
5f97603684 | ||
|
|
f1debd4701 | ||
|
|
1cd0d9a1f2 | ||
|
|
a6320bbad3 | ||
|
|
b1dd8e998b | ||
|
|
c2e8f06bfa | ||
|
|
08a8f7174a | ||
|
|
ce4c1d4f35 | ||
|
|
a0b9bd527e | ||
|
|
ce05ef7110 | ||
|
|
6a47d966a4 | ||
|
|
85d99de26b | ||
|
|
4a82251c62 | ||
|
|
e62c0a58a7 | ||
|
|
1f3e48f18f | ||
|
|
bbbe11790b | ||
|
|
13edf62824 | ||
|
|
558bc2e132 | ||
|
|
0f73129ab7 | ||
|
|
1373efaa39 | ||
|
|
5c37b777fc | ||
|
|
d4df3f2154 | ||
|
|
8ae424c5a3 | ||
|
|
cae19df058 | ||
|
|
8c211fc8df | ||
|
|
74a71f7824 | ||
|
|
12b51c5eb8 | ||
|
|
14069fd8e6 | ||
|
|
cd62f41606 | ||
|
|
109d4ee490 | ||
|
|
18ebec8276 | ||
|
|
c47b4f828f | ||
|
|
c3a0c0c451 | ||
|
|
6cb0de43ce | ||
|
|
83f0d3e03d | ||
|
|
eda4130703 | ||
|
|
ccba859812 | ||
|
|
de3cf5e8d7 | ||
|
|
ce305321b6 | ||
|
|
e6117e978e | ||
|
|
4b40898743 | ||
|
|
ae3a0ec27e | ||
|
|
d9458fb4ee | ||
|
|
27f67edb1a | ||
|
|
3ffea738e6 | ||
|
|
a63dd6020c | ||
|
|
d0678bc3e5 | ||
|
|
ce04a073ef | ||
|
|
c337a367f3 | ||
|
|
7ae40cb352 | ||
|
|
e8daab7971 | ||
|
|
78c3a5ccfa | ||
|
|
2142c75846 | ||
|
|
c724d8f614 | ||
|
|
af5f4ee724 | ||
|
|
01aa4394a6 | ||
|
|
2646519712 | ||
|
|
5b2efd563a | ||
|
|
e7b7432079 | ||
|
|
ea2ef4443b | ||
|
|
25f22ec561 | ||
|
|
5189231a34 | ||
|
|
bcbd30bb8a | ||
|
|
c245183101 | ||
|
|
4ce2a84df0 | ||
|
|
3c31d711b3 | ||
|
|
3f5d8390ba | ||
|
|
78edafcaac | ||
|
|
1ce3673006 | ||
|
|
3423de65fa | ||
|
|
0c81439bc3 | ||
|
|
77fb8ac240 | ||
|
|
979dfb8cc6 | ||
|
|
fe0289f2f5 | ||
|
|
6a64567dd7 | ||
|
|
8de8cd62ca | ||
|
|
cba27d2920 | ||
|
|
9ade179407 | ||
|
|
82b85431bd | ||
|
|
98778b1870 | ||
|
|
dfd46c23f9 | ||
|
|
3ac4407b88 | ||
|
|
8ea0d5212f | ||
|
|
acd350d833 | ||
|
|
2f4b9f619d | ||
|
|
70efd0274c | ||
|
|
33b3eea6ec | ||
|
|
113624691a | ||
|
|
afaec1a2e9 | ||
|
|
ddf39f2d57 | ||
|
|
2df5d95d70 | ||
|
|
64a808ac76 | ||
|
|
05dc7183cb | ||
|
|
e69e181090 | ||
|
|
a1269fa669 | ||
|
|
8369b5209f | ||
|
|
2aa3c0a2af | ||
|
|
ac65d8369e | ||
|
|
7a24532224 | ||
|
|
8057d668bb | ||
|
|
36f1bc4a8a | ||
|
|
beb8098b0a | ||
|
|
6e64a71382 | ||
|
|
3cbd57d9ad | ||
|
|
4f50b26af0 | ||
|
|
cb651b5866 | ||
|
|
3c1069c815 | ||
|
|
7f0020a407 | ||
|
|
c270c1c80c | ||
|
|
29ecc2d8bb | ||
|
|
13da1b8d28 | ||
|
|
0b338eaa28 | ||
|
|
46696865fd | ||
|
|
fcea3777c0 | ||
|
|
5023050d95 | ||
|
|
bed01a303f | ||
|
|
2c2cb84ca7 | ||
|
|
e9dda25c60 | ||
|
|
80ffbade22 | ||
|
|
7beb50caa7 | ||
|
|
e8ba43c479 | ||
|
|
dcd6bedc27 | ||
|
|
5bb76cc35c | ||
|
|
3e68d485f2 | ||
|
|
1945f09d06 | ||
|
|
2c66bdd6bb | ||
|
|
a4f3548bbf | ||
|
|
4276abc58b | ||
|
|
a795d93bc3 | ||
|
|
5df04cb763 | ||
|
|
ef54167a4a | ||
|
|
d42cb11b84 | ||
|
|
b257de4aba | ||
|
|
365b4babae | ||
|
|
6d48dffa2f | ||
|
|
8f2999b6af | ||
|
|
be6115fbfa | ||
|
|
2fcb8f5db7 | ||
|
|
0ab3f020ab | ||
|
|
64c23c2f5b | ||
|
|
ff16e0f6df | ||
|
|
1a82ba196b | ||
|
|
ed72c678f8 | ||
|
|
4ed8836a71 | ||
|
|
5529978fa7 | ||
|
|
66d84c9914 | ||
|
|
b85ddc4e4f | ||
|
|
e4a9e27a55 | ||
|
|
22645eea2e | ||
|
|
345c98ed62 | ||
|
|
b872ff0237 | ||
|
|
1b95718460 | ||
|
|
6a3580c556 | ||
|
|
16c9fba5de | ||
|
|
4e952af614 | ||
|
|
6344c3051c | ||
|
|
ab9f521cbd | ||
|
|
3a900e5bb7 | ||
|
|
b4d7741611 | ||
|
|
95fd79faf9 | ||
|
|
b79dc01016 | ||
|
|
bf562d7373 | ||
|
|
2e9f2ea3d3 | ||
|
|
177dbbc29a | ||
|
|
4712043e26 | ||
|
|
852acd5e4e | ||
|
|
9f1daabb2c | ||
|
|
938dd24cc9 | ||
|
|
57aad81b68 | ||
|
|
a91bcaaeb0 | ||
|
|
d04c21b198 | ||
|
|
4a0a42c2f1 | ||
|
|
cc7b9af50e | ||
|
|
68fef49c55 | ||
|
|
5d4b149884 | ||
|
|
5f20ae707d | ||
|
|
e9c915e6fe | ||
|
|
2ed158aba3 | ||
|
|
05050d53ad | ||
|
|
e391311512 | ||
|
|
3234c28f7c | ||
|
|
6fbd9cf24b | ||
|
|
bc5b63ffef | ||
|
|
788ef76f1c | ||
|
|
0872ec3204 | ||
|
|
0a5870208e | ||
|
|
3219334c3e | ||
|
|
79fd662676 | ||
|
|
34193fd8d9 | ||
|
|
2203766f77 | ||
|
|
235cbe0e57 | ||
|
|
f50f353b5d | ||
|
|
00afe6cc96 | ||
|
|
dd48e62b7e | ||
|
|
a1a780e847 | ||
|
|
fa87077211 | ||
|
|
6ac7145d2d | ||
|
|
f1226f19b2 | ||
|
|
3ecbf2af25 | ||
|
|
79f2e95bf9 | ||
|
|
faee50b238 | ||
|
|
807d4a3c00 | ||
|
|
073d112204 | ||
|
|
14f814b806 | ||
|
|
a288c2b3a3 | ||
|
|
fec16b0ac8 | ||
|
|
dd8717797e | ||
|
|
7e7c239f09 | ||
|
|
edd0e8abb1 | ||
|
|
d2b537d9a1 | ||
|
|
8c3df224ef | ||
|
|
967fd2a778 | ||
|
|
ea12e446ca | ||
|
|
c8cd2b510f | ||
|
|
8b05a8322b | ||
|
|
c98a51b26c | ||
|
|
e2717a031e | ||
|
|
8d33ce0154 | ||
|
|
92745aa950 | ||
|
|
cbc6bf6a89 | ||
|
|
f72575e75f | ||
|
|
0168f55f3e | ||
|
|
8b60ab86a1 | ||
|
|
7463a7a509 | ||
|
|
9ed2de3d6e | ||
|
|
4f35fb59c8 | ||
|
|
59ba8f28c8 | ||
|
|
d298b578ab | ||
|
|
fabbc035c4 | ||
|
|
6530b07cde | ||
|
|
f8b7eaec93 | ||
|
|
5c226e91c0 | ||
|
|
8e3d45d2dc | ||
|
|
a96b522958 | ||
|
|
fedf81c2b7 | ||
|
|
0c6f816a49 | ||
|
|
dfe771fb0c | ||
|
|
bc19e2d84b | ||
|
|
8c4d91cff7 | ||
|
|
2fcc18779b | ||
|
|
96cc3e5a0b | ||
|
|
cc8fe0630c | ||
|
|
1d9e76bb0f | ||
|
|
337110b7a0 | ||
|
|
83733205f6 | ||
|
|
d8306938a1 | ||
|
|
88ea8b305d | ||
|
|
e2f4d7b5e3 | ||
|
|
8140869767 | ||
|
|
6a8de87116 | ||
|
|
0da6f24221 | ||
|
|
771e60bd07 | ||
|
|
40b3c4883f | ||
|
|
e320f4a988 | ||
|
|
5835f15f21 | ||
|
|
67c311233b | ||
|
|
3fcff32524 | ||
|
|
472f065ce7 | ||
|
|
6c6d7eb770 | ||
|
|
c646ada3c3 | ||
|
|
f55359b3ac | ||
|
|
9d9a17547a | ||
|
|
c6dc88766b | ||
|
|
754ce9dec6 | ||
|
|
bd5f685d0a | ||
|
|
c663e24669 | ||
|
|
5948764e9e |
46
.appveyor.yml
Normal file
46
.appveyor.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
ORIGPATH: '%PATH%'
|
||||
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
|
||||
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
|
||||
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||
PATH: '%PATHCC64%'
|
||||
RCLONE_CONFIG_PASS:
|
||||
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
|
||||
|
||||
install:
|
||||
- choco install winfsp -y
|
||||
- choco install zip -y
|
||||
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
|
||||
|
||||
build_script:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go install
|
||||
- go build
|
||||
- make log_since_last_release > %TEMP%\git-log.txt
|
||||
- make version > %TEMP%\version
|
||||
- set /p RCLONE_VERSION=<%TEMP%\version
|
||||
- set PATH=%PATHCC32%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
|
||||
- set PATH=%PATHCC64%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
|
||||
|
||||
test_script:
|
||||
- make GOTAGS=cmount quicktest
|
||||
|
||||
artifacts:
|
||||
- path: rclone.exe
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_BRANCH%" == "master" make upload_beta
|
||||
31
.circleci/config.yml
Normal file
31
.circleci/config.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
machine: true
|
||||
|
||||
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=billziss/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
|
||||
- run:
|
||||
name: Prepare artifacts
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,10 +1,5 @@
|
||||
*~
|
||||
_junk/
|
||||
rclone
|
||||
rclonetest/rclonetest
|
||||
build
|
||||
docs/public
|
||||
MANUAL.md
|
||||
MANUAL.html
|
||||
MANUAL.txt
|
||||
rclone.1
|
||||
|
||||
47
.travis.yml
47
.travis.yml
@@ -1,12 +1,41 @@
|
||||
language: go
|
||||
|
||||
sudo: false
|
||||
osx_image: xcode7.3
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.1.2
|
||||
- 1.2.2
|
||||
- 1.3.3
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
- 1.6.4
|
||||
- 1.7.4
|
||||
- 1.8.3
|
||||
- tip
|
||||
install:
|
||||
- git fetch --unshallow --tags
|
||||
- make vars
|
||||
- make build_dep
|
||||
script:
|
||||
- go get ./...
|
||||
- go test -v ./...
|
||||
- make check
|
||||
- make quicktest
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
matrix:
|
||||
secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.8.3
|
||||
env: GOTAGS=""
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
on:
|
||||
branch: master
|
||||
go: 1.8.3
|
||||
condition: "`uname` == 'Linux'"
|
||||
|
||||
223
CONTRIBUTING.md
Normal file
223
CONTRIBUTING.md
Normal file
@@ -0,0 +1,223 @@
|
||||
# Contributing to rclone #
|
||||
|
||||
This is a short guide on how to contribute things to rclone.
|
||||
|
||||
## Reporting a bug ##
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug
|
||||
then please use the [rclone forum](https://forum.rclone.org/) instead
|
||||
of filing an issue.
|
||||
|
||||
When filing an issue, please include the following information if
|
||||
possible as well as a description of the problem. Make sure you test
|
||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
|
||||
* Rclone version (eg output from `rclone -V`)
|
||||
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
|
||||
## Submitting a pull request ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via Github.
|
||||
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's Github
|
||||
page](https://github.com/ncw/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
cd $GOPATH/src/github.com/ncw/rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
|
||||
Make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
|
||||
And get hacking.
|
||||
|
||||
When ready - run the unit tests for the code you changed
|
||||
|
||||
go test -v
|
||||
|
||||
Note that you may need to make a test remote, eg `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
Note the top level Makefile targets
|
||||
|
||||
* make check
|
||||
* make test
|
||||
|
||||
Both of these will be run by Travis when you make a pull request but
|
||||
you can do this yourself locally too. These require some extra go
|
||||
packages which you can install with
|
||||
|
||||
* make build_dep
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add documentation for a new feature (see below for where)
|
||||
* Add unit tests for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master `git rebase master`
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push origin my-new-feature
|
||||
|
||||
Go to the Github website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to Github with `--force`.
|
||||
|
||||
## Testing ##
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
|
||||
rclone contains a mixture of unit tests and integration tests.
|
||||
Because it is difficult (and in some respects pointless) to test cloud
|
||||
storage systems by mocking all their interfaces, rclone unit tests can
|
||||
run against any of the backends. This is done by making specially
|
||||
named remotes in the default config file.
|
||||
|
||||
If you wanted to test changes in the `drive` backend, then you would
|
||||
need to make a remote called `TestDrive`.
|
||||
|
||||
You can then run the unit tests in the drive directory. These tests
|
||||
are skipped if `TestDrive:` isn't defined.
|
||||
|
||||
cd drive
|
||||
go test -v
|
||||
|
||||
You can then run the integration tests which tests all of rclone's
|
||||
operations. Normally these get run against the local filing system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd ../fs
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -subdir
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then run in that directory
|
||||
|
||||
go run test_all.go
|
||||
|
||||
## Writing Documentation ##
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
If you add a new flag, then if it is a general flag, document it in
|
||||
`docs/content/docs.md` - the flags there are supposed to be in
|
||||
alphabetical order. If it is a remote specific flag, then document it
|
||||
in `docs/content/remote.md`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
|
||||
Documentation for rclone sub commands is with their code, eg
|
||||
`cmd/ls/ls.go`.
|
||||
|
||||
## Making a release ##
|
||||
|
||||
There are separate instructions for making a release in the RELEASE.md
|
||||
file.
|
||||
|
||||
## Adding a dependency ##
|
||||
|
||||
rclone uses the [dep](https://github.com/golang/dep) tool to manage
|
||||
its dependencies. All code that rclone needs for building is stored
|
||||
in the `vendor` directory for perfectly reproducable builds.
|
||||
|
||||
The `vendor` directory is entirely managed by the `dep` tool.
|
||||
|
||||
To add a new dependency
|
||||
|
||||
dep ensure github.com/pkg/errors
|
||||
|
||||
You can add constraints on that package (see the `dep` documentation),
|
||||
but don't unless you really need to.
|
||||
|
||||
Please check in the changes generated by dep including the `vendor`
|
||||
directory and `Godep.toml` and `Godep.locl` in a single commit
|
||||
separate from any other code changes. Watch out for new files in
|
||||
`vendor`.
|
||||
|
||||
## Updating a dependency ##
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
dep ensure -update github.com/pkg/errors
|
||||
|
||||
Check in in a single commit as above.
|
||||
|
||||
## Updating all the dependencies ##
|
||||
|
||||
In order to update all the dependencies then run `make update`. This
|
||||
just runs `dep ensure -update`. Check in the changes in a single
|
||||
commit as above.
|
||||
|
||||
This should be done early in the release cycle to pick up new versions
|
||||
of packages in time for them to get some testing.
|
||||
|
||||
## Writing a new backend ##
|
||||
|
||||
Choose a name. The docs here will use `remote` as an example.
|
||||
|
||||
Note that in rclone terminology a file system backend is called a
|
||||
remote or an fs.
|
||||
|
||||
Research
|
||||
|
||||
* Look at the interfaces defined in `fs/fs.go`
|
||||
* Study one or more of the existing remotes
|
||||
|
||||
Getting going
|
||||
|
||||
* Create `remote/remote.go` (copy this from a similar remote)
|
||||
* Add your remote to the imports in `fs/all/all.go`
|
||||
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
* Add your fs to the end of `fstest/fstests/gen_tests.go`
|
||||
* generate `remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
|
||||
* Make sure all tests pass with `go test -v`
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your fs to `fs/test_all.go`
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* If you are making a bucket based remote, then check with this also
|
||||
* `go test -v -remote TestRemote: -subdir`
|
||||
* And if your remote defines `ListR` this also
|
||||
* `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/).
|
||||
|
||||
* `README.md` - main Github page
|
||||
* `docs/content/remote.md` - main docs page
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
* `cmd/cmd.go` - the main help for rclone
|
||||
267
Gopkg.lock
generated
Normal file
267
Gopkg.lock
generated
Normal file
@@ -0,0 +1,267 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
packages = [".","fs","fuseutil"]
|
||||
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata","internal"]
|
||||
revision = "2e6a95edb1071d750f6d7db777bf66cd2997af6c"
|
||||
version = "v0.7.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
packages = ["."]
|
||||
revision = "87a46d97951ee1ea20ed3b24c25646a79e87ba5d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
packages = ["."]
|
||||
revision = "c595cd886c223c6c28fc9ae2727a61b5e4693d85"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/s3iface","service/s3/s3manager","service/sts"]
|
||||
revision = "57572ec625c9aa8bf5c45453efa923bacabc0afe"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
packages = ["fuse"]
|
||||
revision = "35bcf037030dcadcd247618c75c00c6cd17482d7"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
revision = "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa"
|
||||
version = "v1.0.6"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "e7fea39b01aea8d5671f6858f0532f56e8bff3a5"
|
||||
version = "v1.27.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","ptypes/any"]
|
||||
revision = "b50ceb1fa9818fa4d78b016c2d4ae025593a7ce3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/go-querystring"
|
||||
packages = ["query"]
|
||||
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/googleapis/gax-go"
|
||||
packages = ["."]
|
||||
revision = "9af46dd5a1713e8b5cd71106287eba3cefdde50b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/jlaffaye/ftp"
|
||||
packages = ["."]
|
||||
revision = "5c7b901224c7880b293e0b5486cb6ebf97bfca37"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9"
|
||||
version = "0.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
packages = ["."]
|
||||
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
|
||||
version = "v0.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/dropbox-sdk-go-unofficial"
|
||||
packages = ["dropbox","dropbox/async","dropbox/files","dropbox/properties"]
|
||||
revision = "5d9f46f9862ae5f65e264e178de6ce2c41a32d40"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
packages = ["."]
|
||||
revision = "96a49aad3fc3889629f2eceb004927386884bd92"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "8e9b10220613abdbc2896808ee6b43e411a4fa6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
revision = "4163cd39dda1c0dda883a713640bc01e08951c24"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "c605e284fe17294bda444b34710735b29d1a9d90"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "a5f8514e29e90a859e93871b1582e5c81f466f82"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
packages = ["."]
|
||||
revision = "da627cc50b6fb2eb623eaffe91fb29d7eddfd06a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
revision = "0b647d0506a698cca42caca173e55559b12a69f2"
|
||||
version = "v1.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/shurcooL/sanitized_anchor_name"
|
||||
packages = ["."]
|
||||
revision = "79c90efaf01eddc01945af5bc1797859189b830b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
packages = ["open"]
|
||||
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [".","doc"]
|
||||
revision = "90687e7bfc7e1e5cd88eb1f513f32f01dc03dd7c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert","require"]
|
||||
revision = "4d4bfba8f1d1027c4fdbe371823030df51419987"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/xanzy/ssh-agent"
|
||||
packages = ["."]
|
||||
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","nacl/secretbox","pbkdf2","poly1305","salsa20/salsa","scrypt","ssh","ssh/agent","ssh/terminal"]
|
||||
revision = "122d919ec1efcfb58483215da23f815853e24b81"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","html","html/atom","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||
revision = "c9b681d35165f1995d6f3034e61f8761d4b90c99"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "ad516a297a9f2a74ecc244861b298c94bdd28b9d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "9ccfe848b9db8435a24c424abbc07a921adf1df5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "8be79e1e0910c292df4e79c241bb7e8f7e725959"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["drive/v2","gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
|
||||
revision = "c858ef4400610cbfd097ffc5f5c6e4a1a51eac86"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "411e09b969b1170a9f0c467558eb4c4c110d9c77"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","codes","credentials","grpclb/grpc_lb_v1","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
|
||||
revision = "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "d1197786e4b7133a2e775df76d12dc57d690f2136871f8b0ad3f793c48b4ab08"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
140
Gopkg.toml
Normal file
140
Gopkg.toml
Normal file
@@ -0,0 +1,140 @@
|
||||
|
||||
## Gopkg.toml example (these lines may be deleted)
|
||||
|
||||
## "required" lists a set of packages (not projects) that must be included in
|
||||
## Gopkg.lock. This list is merged with the set of packages imported by the current
|
||||
## project. Use it when your project needs a package it doesn't explicitly import -
|
||||
## including "main" packages.
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
|
||||
## "ignored" lists a set of packages (not projects) that are ignored when
|
||||
## dep statically analyzes source code. Ignored packages can be in this project,
|
||||
## or in a dependency.
|
||||
# ignored = ["github.com/user/project/badpkg"]
|
||||
|
||||
## Dependencies define constraints on dependent projects. They are respected by
|
||||
## dep whether coming from the Gopkg.toml of the current project or a dependency.
|
||||
# [[constraint]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Recommended: the version constraint to enforce for the project.
|
||||
## Only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: an alternate location (URL or import path) for the project's source.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
## Overrides have the same structure as [[constraint]], but supercede all
|
||||
## [[constraint]] declarations from all projects. Only the current project's
|
||||
## [[override]] are applied.
|
||||
##
|
||||
## Overrides are a sledgehammer. Use them only as a last resort.
|
||||
# [[override]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Optional: specifying a version constraint override will cause all other
|
||||
## constraints on this project to be ignored; only the overriden constraint
|
||||
## need be satisfied.
|
||||
## Again, only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: specifying an alternate source location as an override will
|
||||
## enforce that the alternate location is used for that project, regardless of
|
||||
## what source location any dependent projects specify.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stacktic/dropbox"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/tsenart/tb"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/dropbox-sdk-go-unofficial"
|
||||
17
ISSUE_TEMPLATE.md
Normal file
17
ISSUE_TEMPLATE.md
Normal file
@@ -0,0 +1,17 @@
|
||||
When filing an issue, please include the following information if possible as well as a description of the problem. Make sure you test with the latest beta of rclone.
|
||||
|
||||
https://beta.rclone.org/
|
||||
https://rclone.org/downloads/
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the [rclone forum](https://forum.rclone.org/) instead of filing an issue.
|
||||
|
||||
> What is your rclone version (eg output from `rclone -V`)
|
||||
|
||||
> Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
> Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
> The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
> A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
58
MAINTAINERS.md
Normal file
58
MAINTAINERS.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are
|
||||
|
||||
* Nick Craig-Wood
|
||||
* Stefan Breunig
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
This is a guide for how to be an rclone maintainer.
|
||||
|
||||
## Triaging Tickets ##
|
||||
|
||||
***FIXME*** this section needs some work!
|
||||
|
||||
When a ticket comes in it should be triaged. This means it should be classified into a bug or an enhancement or a request for support.
|
||||
|
||||
Quite a lot of tickets need a bit of back an forth to determine whether it is a valid ticket.
|
||||
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "quickie" tag to give new contributors something easy to do to get going.
|
||||
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Unplanned. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
|
||||
|
||||
***FIXME*** I don't think I've quite got the milestone thing sorted yet. I was wondering about classifying them into priority, or what?
|
||||
|
||||
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
|
||||
## Closing Tickets ##
|
||||
|
||||
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||
|
||||
## Pull requests ##
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits.
|
||||
|
||||
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
|
||||
|
||||
## Merges ##
|
||||
|
||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||
|
||||
## Release cycle ##
|
||||
|
||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
|
||||
|
||||
High impact regressions should be fixed before the next release.
|
||||
|
||||
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
|
||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
|
||||
## TODO ##
|
||||
|
||||
I should probably make a mailing list for maintainers or at least an rclone-dev list, and I should probably make a dev@rclone.org to register with cloud providers.
|
||||
4687
MANUAL.html
Normal file
4687
MANUAL.html
Normal file
File diff suppressed because it is too large
Load Diff
6990
MANUAL.txt
Normal file
6990
MANUAL.txt
Normal file
File diff suppressed because it is too large
Load Diff
131
Makefile
131
Makefile
@@ -1,22 +1,79 @@
|
||||
TAG := $(shell git describe --tags)
|
||||
SHELL = /bin/bash
|
||||
TAG := $(shell echo `git describe --abbrev=8 --tags`-`git rev-parse --abbrev-ref HEAD` | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
GO_LATEST := $(findstring go1.8,$(GO_VERSION))
|
||||
BETA_URL := https://beta.rclone.org/$(TAG)/
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
endif
|
||||
|
||||
.PHONY: rclone vars version
|
||||
|
||||
rclone:
|
||||
@go version
|
||||
go install -v ./...
|
||||
touch fs/version.go
|
||||
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
cp -av `go env GOPATH`/bin/rclone .
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo GO_LATEST="'$(GO_LATEST)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
version:
|
||||
@echo '$(TAG)'
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go test ./...
|
||||
cd fs && ./test_all.sh
|
||||
go test $(BUILDTAGS) $(GO_FILES)
|
||||
cd fs && go run $(BUILDTAGS) test_all.go
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||
ifdef GO_LATEST
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||
endif
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
ifdef GO_LATEST
|
||||
go tool vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf . 2>&1 | grep -E -v vendor/ ; test $$? -eq 1
|
||||
errcheck $(BUILDTAGS) $(GO_FILES)
|
||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||
go list ./... | grep -v /vendor/ | xargs -i golint {} | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
else
|
||||
@echo Skipping tests as not on Go stable
|
||||
endif
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef GO_LATEST
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get -u github.com/inconshreveable/mousetrap
|
||||
go get -u github.com/tools/godep
|
||||
endif
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
dep ensure -update -v
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: make_manual.py docs/content/*.md
|
||||
./make_manual.py
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
|
||||
@@ -24,6 +81,9 @@ MANUAL.html: MANUAL.md
|
||||
MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
rclone gendocs docs/content/commands/
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||
@@ -32,7 +92,7 @@ clean:
|
||||
go clean ./...
|
||||
find . -name \*~ | xargs -r rm -f
|
||||
rm -rf build docs/public
|
||||
rm -f rclone rclonetest/rclonetest rclone.1 MANUAL.md MANUAL.html MANUAL.txt
|
||||
rm -f rclone rclonetest/rclonetest
|
||||
|
||||
website:
|
||||
cd docs && hugo
|
||||
@@ -43,21 +103,50 @@ upload_website: website
|
||||
upload:
|
||||
rclone -v copy build/ memstore:downloads-rclone-org
|
||||
|
||||
cross: doc
|
||||
./cross-compile $(TAG)
|
||||
upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
serve:
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
|
||||
|
||||
log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
upload_beta:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' build/ memstore:beta-rclone-org
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
travis_beta:
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt -exclude "^windows/" -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' build/ memstore:beta-rclone-org
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the windows builds from appveyor
|
||||
fetch_windows:
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' memstore:beta-rclone-org build/$(TAG) build/
|
||||
cp -av rclone-v*-windows-*.zip rclone-current-windows-386.zip
|
||||
cp -av rclone-v*-windows-*.zip rclone-current-windows-amd64.zip
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
tag:
|
||||
tag: doc
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n const Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
|
||||
git tag $(NEW_TAG)
|
||||
@echo "Add this to changelog in docs/content/changelog.md"
|
||||
@echo " * $(NEW_TAG) -" `date -I`
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
|
||||
@echo "Then commit the changes"
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
@@ -65,5 +154,13 @@ tag:
|
||||
retag:
|
||||
git tag -f $(LAST_TAG)
|
||||
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
|
||||
gen_tests:
|
||||
cd fstest/fstests && go run gen_tests.go
|
||||
cd fstest/fstests && go generate
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
|
||||
36
README.md
36
README.md
@@ -1,11 +1,17 @@
|
||||
[](http://rclone.org/)
|
||||
[](https://rclone.org/)
|
||||
|
||||
[Website](http://rclone.org) |
|
||||
[Documentation](http://rclone.org/docs/) |
|
||||
[Installation](http://rclone.org/install/) |
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone) [](https://godoc.org/github.com/ncw/rclone)
|
||||
[](https://travis-ci.org/ncw/rclone)
|
||||
[](https://ci.appveyor.com/project/ncw/rclone)
|
||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||
[](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
@@ -14,22 +20,32 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Amazon Drive
|
||||
* Microsoft OneDrive
|
||||
* Hubic
|
||||
* Backblaze B2
|
||||
* Yandex Disk
|
||||
* SFTP
|
||||
* FTP
|
||||
* HTTP
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
|
||||
* MD5SUMs checked at all times for file integrity
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync mode to make a directory identical
|
||||
* Check mode to check all MD5SUMs
|
||||
* Can sync to and from network, eg two different Drive accounts
|
||||
* Sync (one way) mode to make a directory identical
|
||||
* Check mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
* Optional encryption (Crypt)
|
||||
* Optional FUSE mount
|
||||
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
|
||||
* http://rclone.org/
|
||||
* https://rclone.org/
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
35
RELEASE.md
35
RELEASE.md
@@ -1,19 +1,38 @@
|
||||
Required software for making a release
|
||||
Extra required software for making a release
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* [gox](https://github.com/mitchellh/gox) for cross compiling
|
||||
* Run `gox -build-toolchain`
|
||||
* This assumes you have your own source checkout
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
Making a release
|
||||
* go get -u -f -v ./...
|
||||
* git status - make sure everything is checked in
|
||||
* Check travis & appveyor builds are green
|
||||
* make check
|
||||
* make test
|
||||
* make tag
|
||||
* edit docs/content/changelog.md
|
||||
* git commit -a -v
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* # Update version number in snapcraft.yml
|
||||
* git commit -a -v -m "Version v1.XX"
|
||||
* make retag
|
||||
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross
|
||||
* # Set the GOPATH for a current stable go compiler
|
||||
* make cross
|
||||
* git push --tags origin master
|
||||
* git push --tags origin master:stable # update the stable branch for packager.io
|
||||
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
|
||||
* make fetch_windows
|
||||
* make upload
|
||||
* make upload_website
|
||||
* git push --tags origin master
|
||||
* make upload_github
|
||||
* make startdev
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
* carry forward any patches to vendor stuff
|
||||
* git commit -a -v
|
||||
|
||||
## Make version number go to -DEV and check in
|
||||
|
||||
Make the version number be just in a file?
|
||||
1186
amazonclouddrive/amazonclouddrive.go
Normal file
1186
amazonclouddrive/amazonclouddrive.go
Normal file
File diff suppressed because it is too large
Load Diff
72
amazonclouddrive/amazonclouddrive_test.go
Normal file
72
amazonclouddrive/amazonclouddrive_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/amazonclouddrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
301
b2/api/types.go
Normal file
301
b2/api/types.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
type Error struct {
|
||||
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
|
||||
Code string `json:"code"` // A single-identifier code that identifies the error.
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
return e.Status == 403 // 403 errors shouldn't be retried
|
||||
}
|
||||
|
||||
var _ fs.Fataler = (*Error)(nil)
|
||||
|
||||
// Account describes a B2 account
|
||||
type Account struct {
|
||||
ID string `json:"accountId"` // The identifier for the account.
|
||||
}
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||
// fits in a 64 bit integer such as the type "long" in the programming
|
||||
// language Java. It is intended to be compatible with Java's time
|
||||
// long. For example, it can be passed directly into the java call
|
||||
// Date.setTime(long time).
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
timestamp, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := (time.Time)(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
//
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return (time.Time)(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if (time.Time)(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if (time.Time)(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return (time.Time)(t).Equal((time.Time)(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
type File struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
Size int64 `json:"size"` // The number of bytes in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
type ListBucketsResponse struct {
|
||||
Buckets []Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesRequest struct {
|
||||
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
|
||||
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
|
||||
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
|
||||
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
|
||||
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
|
||||
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
|
||||
}
|
||||
|
||||
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesResponse struct {
|
||||
Files []File `json:"files"` // An array of objects, each one describing one file.
|
||||
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
|
||||
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
|
||||
}
|
||||
|
||||
// GetUploadURLRequest is passed to b2_get_upload_url
|
||||
type GetUploadURLRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
}
|
||||
|
||||
// GetUploadURLResponse is received from b2_get_upload_url
|
||||
type GetUploadURLResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||
}
|
||||
|
||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||
type FileInfo struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
AccountID string `json:"accountId"` // Your account ID.
|
||||
BucketID string `json:"bucketId"` // The bucket that the file is in.
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// CreateBucketRequest is used to create a bucket
|
||||
type CreateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// DeleteBucketRequest is used to create a bucket
|
||||
type DeleteBucketRequest struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
}
|
||||
|
||||
// DeleteFileRequest is used to delete a file version
|
||||
type DeleteFileRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
}
|
||||
|
||||
// HideFileRequest is used to delete a file
|
||||
type HideFileRequest struct {
|
||||
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
|
||||
Name string `json:"fileName"` // The name of the file to hide.
|
||||
}
|
||||
|
||||
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
|
||||
type GetFileInfoRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
}
|
||||
|
||||
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
|
||||
//
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
// Java call Date.setTime(long time).
|
||||
//
|
||||
// If the caller knows the SHA1 of the entire large file being
|
||||
// uploaded, Backblaze recommends using large_file_sha1 as the name,
|
||||
// and a 40 byte hex string representing the SHA1.
|
||||
//
|
||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
}
|
||||
|
||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||
type GetUploadPartURLRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLResponse is received from b2_get_upload_url
|
||||
type GetUploadPartURLResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
|
||||
}
|
||||
|
||||
// UploadPartResponse is the response to b2_upload_part
|
||||
type UploadPartResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
}
|
||||
|
||||
// FinishLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
|
||||
//
|
||||
// Large files do not have a SHA1 checksum. The value will always be "none".
|
||||
type FinishLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
|
||||
}
|
||||
|
||||
// CancelLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a CancelLargeFileResponse
|
||||
type CancelLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// CancelLargeFileResponse is the response to CancelLargeFileRequest
|
||||
type CancelLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
}
|
||||
87
b2/api/types_test.go
Normal file
87
b2/api/types_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyT api.Timestamp
|
||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||
)
|
||||
|
||||
func TestTimestampMarshalJSON(t *testing.T) {
|
||||
resB, err := t0.MarshalJSON()
|
||||
res := string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "3661123", res)
|
||||
|
||||
resB, err = t1.MarshalJSON()
|
||||
res = string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "981173106123", res)
|
||||
}
|
||||
|
||||
func TestTimestampUnmarshalJSON(t *testing.T) {
|
||||
var tActual api.Timestamp
|
||||
err := tActual.UnmarshalJSON([]byte("981173106123"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||
}
|
||||
|
||||
func TestTimestampAddVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
t api.Timestamp
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||
{t1, "", "-v2001-02-03-040506-123"},
|
||||
} {
|
||||
actual := test.t.AddVersion(test.in)
|
||||
assert.Equal(t, test.expected, actual, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampRemoveVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedT api.Timestamp
|
||||
expectedRemote string
|
||||
}{
|
||||
{"potato.txt", emptyT, "potato.txt"},
|
||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||
{"-v2001-02-03-040506-123", t1, ""},
|
||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||
} {
|
||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsZero(t *testing.T) {
|
||||
assert.True(t, emptyT.IsZero())
|
||||
assert.False(t, t0.IsZero())
|
||||
assert.False(t, t1.IsZero())
|
||||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
}
|
||||
170
b2/b2_internal_test.go
Normal file
170
b2/b2_internal_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package b2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fstest"
|
||||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||
|
||||
var encodeTest = []struct {
|
||||
fullyEncoded string
|
||||
minimallyEncoded string
|
||||
plainText string
|
||||
}{
|
||||
{fullyEncoded: "%20", minimallyEncoded: "+", plainText: " "},
|
||||
{fullyEncoded: "%21", minimallyEncoded: "!", plainText: "!"},
|
||||
{fullyEncoded: "%22", minimallyEncoded: "%22", plainText: "\""},
|
||||
{fullyEncoded: "%23", minimallyEncoded: "%23", plainText: "#"},
|
||||
{fullyEncoded: "%24", minimallyEncoded: "$", plainText: "$"},
|
||||
{fullyEncoded: "%25", minimallyEncoded: "%25", plainText: "%"},
|
||||
{fullyEncoded: "%26", minimallyEncoded: "%26", plainText: "&"},
|
||||
{fullyEncoded: "%27", minimallyEncoded: "'", plainText: "'"},
|
||||
{fullyEncoded: "%28", minimallyEncoded: "(", plainText: "("},
|
||||
{fullyEncoded: "%29", minimallyEncoded: ")", plainText: ")"},
|
||||
{fullyEncoded: "%2A", minimallyEncoded: "*", plainText: "*"},
|
||||
{fullyEncoded: "%2B", minimallyEncoded: "%2B", plainText: "+"},
|
||||
{fullyEncoded: "%2C", minimallyEncoded: "%2C", plainText: ","},
|
||||
{fullyEncoded: "%2D", minimallyEncoded: "-", plainText: "-"},
|
||||
{fullyEncoded: "%2E", minimallyEncoded: ".", plainText: "."},
|
||||
{fullyEncoded: "%2F", minimallyEncoded: "/", plainText: "/"},
|
||||
{fullyEncoded: "%30", minimallyEncoded: "0", plainText: "0"},
|
||||
{fullyEncoded: "%31", minimallyEncoded: "1", plainText: "1"},
|
||||
{fullyEncoded: "%32", minimallyEncoded: "2", plainText: "2"},
|
||||
{fullyEncoded: "%33", minimallyEncoded: "3", plainText: "3"},
|
||||
{fullyEncoded: "%34", minimallyEncoded: "4", plainText: "4"},
|
||||
{fullyEncoded: "%35", minimallyEncoded: "5", plainText: "5"},
|
||||
{fullyEncoded: "%36", minimallyEncoded: "6", plainText: "6"},
|
||||
{fullyEncoded: "%37", minimallyEncoded: "7", plainText: "7"},
|
||||
{fullyEncoded: "%38", minimallyEncoded: "8", plainText: "8"},
|
||||
{fullyEncoded: "%39", minimallyEncoded: "9", plainText: "9"},
|
||||
{fullyEncoded: "%3A", minimallyEncoded: ":", plainText: ":"},
|
||||
{fullyEncoded: "%3B", minimallyEncoded: ";", plainText: ";"},
|
||||
{fullyEncoded: "%3C", minimallyEncoded: "%3C", plainText: "<"},
|
||||
{fullyEncoded: "%3D", minimallyEncoded: "=", plainText: "="},
|
||||
{fullyEncoded: "%3E", minimallyEncoded: "%3E", plainText: ">"},
|
||||
{fullyEncoded: "%3F", minimallyEncoded: "%3F", plainText: "?"},
|
||||
{fullyEncoded: "%40", minimallyEncoded: "@", plainText: "@"},
|
||||
{fullyEncoded: "%41", minimallyEncoded: "A", plainText: "A"},
|
||||
{fullyEncoded: "%42", minimallyEncoded: "B", plainText: "B"},
|
||||
{fullyEncoded: "%43", minimallyEncoded: "C", plainText: "C"},
|
||||
{fullyEncoded: "%44", minimallyEncoded: "D", plainText: "D"},
|
||||
{fullyEncoded: "%45", minimallyEncoded: "E", plainText: "E"},
|
||||
{fullyEncoded: "%46", minimallyEncoded: "F", plainText: "F"},
|
||||
{fullyEncoded: "%47", minimallyEncoded: "G", plainText: "G"},
|
||||
{fullyEncoded: "%48", minimallyEncoded: "H", plainText: "H"},
|
||||
{fullyEncoded: "%49", minimallyEncoded: "I", plainText: "I"},
|
||||
{fullyEncoded: "%4A", minimallyEncoded: "J", plainText: "J"},
|
||||
{fullyEncoded: "%4B", minimallyEncoded: "K", plainText: "K"},
|
||||
{fullyEncoded: "%4C", minimallyEncoded: "L", plainText: "L"},
|
||||
{fullyEncoded: "%4D", minimallyEncoded: "M", plainText: "M"},
|
||||
{fullyEncoded: "%4E", minimallyEncoded: "N", plainText: "N"},
|
||||
{fullyEncoded: "%4F", minimallyEncoded: "O", plainText: "O"},
|
||||
{fullyEncoded: "%50", minimallyEncoded: "P", plainText: "P"},
|
||||
{fullyEncoded: "%51", minimallyEncoded: "Q", plainText: "Q"},
|
||||
{fullyEncoded: "%52", minimallyEncoded: "R", plainText: "R"},
|
||||
{fullyEncoded: "%53", minimallyEncoded: "S", plainText: "S"},
|
||||
{fullyEncoded: "%54", minimallyEncoded: "T", plainText: "T"},
|
||||
{fullyEncoded: "%55", minimallyEncoded: "U", plainText: "U"},
|
||||
{fullyEncoded: "%56", minimallyEncoded: "V", plainText: "V"},
|
||||
{fullyEncoded: "%57", minimallyEncoded: "W", plainText: "W"},
|
||||
{fullyEncoded: "%58", minimallyEncoded: "X", plainText: "X"},
|
||||
{fullyEncoded: "%59", minimallyEncoded: "Y", plainText: "Y"},
|
||||
{fullyEncoded: "%5A", minimallyEncoded: "Z", plainText: "Z"},
|
||||
{fullyEncoded: "%5B", minimallyEncoded: "%5B", plainText: "["},
|
||||
{fullyEncoded: "%5C", minimallyEncoded: "%5C", plainText: "\\"},
|
||||
{fullyEncoded: "%5D", minimallyEncoded: "%5D", plainText: "]"},
|
||||
{fullyEncoded: "%5E", minimallyEncoded: "%5E", plainText: "^"},
|
||||
{fullyEncoded: "%5F", minimallyEncoded: "_", plainText: "_"},
|
||||
{fullyEncoded: "%60", minimallyEncoded: "%60", plainText: "`"},
|
||||
{fullyEncoded: "%61", minimallyEncoded: "a", plainText: "a"},
|
||||
{fullyEncoded: "%62", minimallyEncoded: "b", plainText: "b"},
|
||||
{fullyEncoded: "%63", minimallyEncoded: "c", plainText: "c"},
|
||||
{fullyEncoded: "%64", minimallyEncoded: "d", plainText: "d"},
|
||||
{fullyEncoded: "%65", minimallyEncoded: "e", plainText: "e"},
|
||||
{fullyEncoded: "%66", minimallyEncoded: "f", plainText: "f"},
|
||||
{fullyEncoded: "%67", minimallyEncoded: "g", plainText: "g"},
|
||||
{fullyEncoded: "%68", minimallyEncoded: "h", plainText: "h"},
|
||||
{fullyEncoded: "%69", minimallyEncoded: "i", plainText: "i"},
|
||||
{fullyEncoded: "%6A", minimallyEncoded: "j", plainText: "j"},
|
||||
{fullyEncoded: "%6B", minimallyEncoded: "k", plainText: "k"},
|
||||
{fullyEncoded: "%6C", minimallyEncoded: "l", plainText: "l"},
|
||||
{fullyEncoded: "%6D", minimallyEncoded: "m", plainText: "m"},
|
||||
{fullyEncoded: "%6E", minimallyEncoded: "n", plainText: "n"},
|
||||
{fullyEncoded: "%6F", minimallyEncoded: "o", plainText: "o"},
|
||||
{fullyEncoded: "%70", minimallyEncoded: "p", plainText: "p"},
|
||||
{fullyEncoded: "%71", minimallyEncoded: "q", plainText: "q"},
|
||||
{fullyEncoded: "%72", minimallyEncoded: "r", plainText: "r"},
|
||||
{fullyEncoded: "%73", minimallyEncoded: "s", plainText: "s"},
|
||||
{fullyEncoded: "%74", minimallyEncoded: "t", plainText: "t"},
|
||||
{fullyEncoded: "%75", minimallyEncoded: "u", plainText: "u"},
|
||||
{fullyEncoded: "%76", minimallyEncoded: "v", plainText: "v"},
|
||||
{fullyEncoded: "%77", minimallyEncoded: "w", plainText: "w"},
|
||||
{fullyEncoded: "%78", minimallyEncoded: "x", plainText: "x"},
|
||||
{fullyEncoded: "%79", minimallyEncoded: "y", plainText: "y"},
|
||||
{fullyEncoded: "%7A", minimallyEncoded: "z", plainText: "z"},
|
||||
{fullyEncoded: "%7B", minimallyEncoded: "%7B", plainText: "{"},
|
||||
{fullyEncoded: "%7C", minimallyEncoded: "%7C", plainText: "|"},
|
||||
{fullyEncoded: "%7D", minimallyEncoded: "%7D", plainText: "}"},
|
||||
{fullyEncoded: "%7E", minimallyEncoded: "~", plainText: "~"},
|
||||
{fullyEncoded: "%7F", minimallyEncoded: "%7F", plainText: "\u007f"},
|
||||
{fullyEncoded: "%E8%87%AA%E7%94%B1", minimallyEncoded: "%E8%87%AA%E7%94%B1", plainText: "自由"},
|
||||
{fullyEncoded: "%F0%90%90%80", minimallyEncoded: "%F0%90%90%80", plainText: "𐐀"},
|
||||
}
|
||||
|
||||
func TestUrlEncode(t *testing.T) {
|
||||
for _, test := range encodeTest {
|
||||
got := urlEncode(test.plainText)
|
||||
if got != test.minimallyEncoded && got != test.fullyEncoded {
|
||||
t.Errorf("urlEncode(%q) got %q wanted %q or %q", test.plainText, got, test.minimallyEncoded, test.fullyEncoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in time.Time
|
||||
want string
|
||||
}{
|
||||
{fstest.Time("1970-01-01T00:00:00.000000000Z"), "0"},
|
||||
{fstest.Time("2001-02-03T04:05:10.123123123Z"), "981173110123"},
|
||||
{fstest.Time("2001-02-03T05:05:10.123123123+01:00"), "981173110123"},
|
||||
} {
|
||||
got := timeString(test.in)
|
||||
if test.want != got {
|
||||
t.Logf("%v: want %v got %v", test.in, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseTimeString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want time.Time
|
||||
wantError string
|
||||
}{
|
||||
{"0", fstest.Time("1970-01-01T00:00:00.000000000Z"), ""},
|
||||
{"981173110123", fstest.Time("2001-02-03T04:05:10.123000000Z"), ""},
|
||||
{"", time.Time{}, ""},
|
||||
{"potato", time.Time{}, `strconv.ParseInt: parsing "potato": invalid syntax`},
|
||||
} {
|
||||
o := Object{}
|
||||
err := o.parseTimeString(test.in)
|
||||
got := o.modTime
|
||||
var gotError string
|
||||
if err != nil {
|
||||
gotError = err.Error()
|
||||
}
|
||||
if test.want != got {
|
||||
t.Logf("%v: want %v got %v", test.in, test.want, got)
|
||||
}
|
||||
if test.wantError != gotError {
|
||||
t.Logf("%v: want error %v got error %v", test.in, test.wantError, gotError)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
72
b2/b2_test.go
Normal file
72
b2/b2_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// Test B2 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*b2.Object)(nil))
|
||||
fstests.RemoteName = "TestB2:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
302
b2/upload.go
Normal file
302
b2/upload.go
Normal file
@@ -0,0 +1,302 @@
|
||||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
modTime := src.ModTime()
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up = &largeUpload{
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, parts),
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_part_url",
|
||||
}
|
||||
var request = api.GetUploadPartURLRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
|
||||
// returnUploadURL returns the UploadURL to the cache
|
||||
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
if upload == nil {
|
||||
return
|
||||
}
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = append(up.uploads, upload)
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (up *largeUpload) clearUploadURL() {
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = nil
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
calculatedSHA1 := fmt.Sprintf("%x", sha1.Sum(body))
|
||||
up.sha1s[part-1] = calculatedSHA1
|
||||
size := int64(len(body))
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Authorization
|
||||
//
|
||||
// An upload authorization token, from b2_get_upload_part_url.
|
||||
//
|
||||
// X-Bz-Part-Number
|
||||
//
|
||||
// A number from 1 to 10000. The parts uploaded for one file
|
||||
// must have contiguous numbers, starting with 1.
|
||||
//
|
||||
// Content-Length
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Absolute: true,
|
||||
Path: upload.UploadURL,
|
||||
Body: fs.AccountPart(up.o, bytes.NewBuffer(body)),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
sha1Header: calculatedSHA1,
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_finish_large_file",
|
||||
}
|
||||
var request = api.FinishLargeFileRequest{
|
||||
ID: up.id,
|
||||
SHA1s: up.sha1s,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
}
|
||||
var request = api.CancelLargeFileRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
var err error
|
||||
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
|
||||
outer:
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()[:reqSize]
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(part, buf)
|
||||
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Check any errors
|
||||
fs.Debugf(up.o, "Finishing large file upload")
|
||||
return up.finish()
|
||||
}
|
||||
199
bin/cross-compile.go
Normal file
199
bin/cross-compile.go
Normal file
@@ -0,0 +1,199 @@
|
||||
// +build ignore
|
||||
|
||||
// Cross compile rclone - in go because I hate bash ;-)
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
debug = flag.Bool("d", false, "Print commands instead of running them.")
|
||||
parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel.")
|
||||
copyAs = flag.String("release", "", "Make copies of the releases with this name")
|
||||
gitLog = flag.String("git-log", "", "git log to include as well")
|
||||
include = flag.String("include", "^.*$", "os/arch regexp to include")
|
||||
exclude = flag.String("exclude", "^$", "os/arch regexp to exclude")
|
||||
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
||||
tags = flag.String("tags", "", "Space separated list of build tags")
|
||||
)
|
||||
|
||||
// GOOS/GOARCH pairs we build for
|
||||
var osarches = []string{
|
||||
"windows/386",
|
||||
"windows/amd64",
|
||||
"darwin/386",
|
||||
"darwin/amd64",
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
"linux/arm",
|
||||
"linux/arm64",
|
||||
"linux/mips",
|
||||
"linux/mipsle",
|
||||
"freebsd/386",
|
||||
"freebsd/amd64",
|
||||
"freebsd/arm",
|
||||
"netbsd/386",
|
||||
"netbsd/amd64",
|
||||
"netbsd/arm",
|
||||
"openbsd/386",
|
||||
"openbsd/amd64",
|
||||
"plan9/386",
|
||||
"plan9/amd64",
|
||||
"solaris/amd64",
|
||||
}
|
||||
|
||||
// Special environment flags for a given arch
|
||||
var archFlags = map[string][]string{
|
||||
"386": {"GO386=387"},
|
||||
}
|
||||
|
||||
// runEnv - run a shell command with env
|
||||
func runEnv(args, env []string) {
|
||||
if *debug {
|
||||
args = append([]string{"echo"}, args...)
|
||||
}
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if env != nil {
|
||||
cmd.Env = append(os.Environ(), env...)
|
||||
}
|
||||
if *debug {
|
||||
log.Printf("args = %v, env = %v\n", args, cmd.Env)
|
||||
}
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to run %v: %v", args, err)
|
||||
}
|
||||
}
|
||||
|
||||
// run a shell command
|
||||
func run(args ...string) {
|
||||
runEnv(args, nil)
|
||||
}
|
||||
|
||||
// build the binary in dir
|
||||
func compileArch(version, goos, goarch, dir string) {
|
||||
log.Printf("Compiling %s/%s", goos, goarch)
|
||||
output := filepath.Join(dir, "rclone")
|
||||
if goos == "windows" {
|
||||
output += ".exe"
|
||||
}
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to mkdir: %v", err)
|
||||
}
|
||||
args := []string{
|
||||
"go", "build",
|
||||
"--ldflags", "-s -X github.com/ncw/rclone/fs.Version=" + version,
|
||||
"-i",
|
||||
"-o", output,
|
||||
"-tags", *tags,
|
||||
"..",
|
||||
}
|
||||
env := []string{
|
||||
"GOOS=" + goos,
|
||||
"GOARCH=" + goarch,
|
||||
}
|
||||
if !*cgo {
|
||||
env = append(env, "CGO_ENABLED=0")
|
||||
} else {
|
||||
env = append(env, "CGO_ENABLED=1")
|
||||
}
|
||||
if flags, ok := archFlags[goarch]; ok {
|
||||
env = append(env, flags...)
|
||||
}
|
||||
runEnv(args, env)
|
||||
// Now build the zip
|
||||
run("cp", "-a", "../MANUAL.txt", filepath.Join(dir, "README.txt"))
|
||||
run("cp", "-a", "../MANUAL.html", filepath.Join(dir, "README.html"))
|
||||
run("cp", "-a", "../rclone.1", dir)
|
||||
if *gitLog != "" {
|
||||
run("cp", "-a", *gitLog, dir)
|
||||
}
|
||||
zip := dir + ".zip"
|
||||
run("zip", "-r9", zip, dir)
|
||||
if *copyAs != "" {
|
||||
copyAsZip := strings.Replace(zip, "-"+version, "-"+*copyAs, 1)
|
||||
run("ln", zip, copyAsZip)
|
||||
}
|
||||
run("rm", "-rf", dir)
|
||||
log.Printf("Done compiling %s/%s", goos, goarch)
|
||||
}
|
||||
|
||||
func compile(version string) {
|
||||
start := time.Now()
|
||||
wg := new(sync.WaitGroup)
|
||||
run := make(chan func(), *parallel)
|
||||
for i := 0; i < *parallel; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for f := range run {
|
||||
f()
|
||||
}
|
||||
}()
|
||||
}
|
||||
includeRe, err := regexp.Compile(*include)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad -include regexp: %v", err)
|
||||
}
|
||||
excludeRe, err := regexp.Compile(*exclude)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad -exclude regexp: %v", err)
|
||||
}
|
||||
compiled := 0
|
||||
for _, osarch := range osarches {
|
||||
if excludeRe.MatchString(osarch) || !includeRe.MatchString(osarch) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(osarch, "/")
|
||||
if len(parts) != 2 {
|
||||
log.Fatalf("Bad osarch %q", osarch)
|
||||
}
|
||||
goos, goarch := parts[0], parts[1]
|
||||
userGoos := goos
|
||||
if goos == "darwin" {
|
||||
userGoos = "osx"
|
||||
}
|
||||
dir := filepath.Join("rclone-" + version + "-" + userGoos + "-" + goarch)
|
||||
run <- func() {
|
||||
compileArch(version, goos, goarch, dir)
|
||||
}
|
||||
compiled++
|
||||
}
|
||||
close(run)
|
||||
wg.Wait()
|
||||
log.Printf("Compiled %d arches in %v", compiled, time.Since(start))
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Syntax: %s <version>", os.Args[0])
|
||||
}
|
||||
version := args[0]
|
||||
if !*noClean {
|
||||
run("rm", "-rf", "build")
|
||||
run("mkdir", "build")
|
||||
}
|
||||
err := os.Chdir("build")
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't cd into build dir: %v", err)
|
||||
}
|
||||
compile(version)
|
||||
}
|
||||
59
bin/decrypt_names.py
Executable file
59
bin/decrypt_names.py
Executable file
@@ -0,0 +1,59 @@
|
||||
#!/usr/bin/python
|
||||
"""
|
||||
This is a tool to decrypt file names in rclone logs.
|
||||
|
||||
Pass two files in, the first should be a crypt mapping generated by
|
||||
|
||||
rclone ls --crypt-show-mapping remote:path
|
||||
|
||||
The second should be a log file that you want the paths decrypted in.
|
||||
|
||||
Note that if the crypt mappings file is large it can take some time to
|
||||
run.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
# Crypt line
|
||||
match_crypt = re.compile(r'NOTICE: (.*?): Encrypts to "(.*?)"$')
|
||||
|
||||
def read_crypt_map(mapping_file):
|
||||
"""
|
||||
Read the crypt mapping file in, creating a dictionary of substitutions
|
||||
"""
|
||||
mapping = {}
|
||||
with open(mapping_file) as fd:
|
||||
for line in fd:
|
||||
match = match_crypt.search(line)
|
||||
if match:
|
||||
plaintext, ciphertext = match.groups()
|
||||
plaintexts = plaintext.split("/")
|
||||
ciphertexts = ciphertext.split("/")
|
||||
for plain, cipher in zip(plaintexts, ciphertexts):
|
||||
mapping[cipher] = plain
|
||||
return mapping
|
||||
|
||||
def map_log_file(crypt_map, log_file):
|
||||
"""
|
||||
Substitute the crypt_map in the log file.
|
||||
|
||||
This uses a straight forward O(N**2) algorithm. I tried using
|
||||
regexps to speed it up but it made it slower!
|
||||
"""
|
||||
with open(log_file) as fd:
|
||||
for line in fd:
|
||||
for cipher, plain in crypt_map.iteritems():
|
||||
line = line.replace(cipher, plain)
|
||||
sys.stdout.write(line)
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print "Syntax: %s <crypt-mapping-file> <log-file>" % sys.argv[0]
|
||||
raise SystemExit(1)
|
||||
mapping_file, log_file = sys.argv[1:]
|
||||
crypt_map = read_crypt_map(mapping_file)
|
||||
map_log_file(crypt_map, log_file)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
137
bin/make_manual.py
Executable file
137
bin/make_manual.py
Executable file
@@ -0,0 +1,137 @@
|
||||
#!/usr/bin/python
|
||||
"""
|
||||
Make single page versions of the documentation for release and
|
||||
conversion into man pages etc.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
outfile = "MANUAL.md"
|
||||
|
||||
# Order to add docs segments to make outfile
|
||||
docs = [
|
||||
"about.md",
|
||||
"install.md",
|
||||
"docs.md",
|
||||
"remote_setup.md",
|
||||
"filtering.md",
|
||||
"overview.md",
|
||||
"drive.md",
|
||||
"s3.md",
|
||||
"swift.md",
|
||||
"dropbox.md",
|
||||
"googlecloudstorage.md",
|
||||
"amazonclouddrive.md",
|
||||
"onedrive.md",
|
||||
"hubic.md",
|
||||
"b2.md",
|
||||
"yandex.md",
|
||||
"sftp.md",
|
||||
"ftp.md",
|
||||
"http.md",
|
||||
"crypt.md",
|
||||
"local.md",
|
||||
"changelog.md",
|
||||
"bugs.md",
|
||||
"faq.md",
|
||||
"licence.md",
|
||||
"authors.md",
|
||||
"contact.md",
|
||||
]
|
||||
|
||||
# Order to put the commands in - any not on here will be in sorted order
|
||||
commands_order = [
|
||||
"rclone_config.md",
|
||||
"rclone_copy.md",
|
||||
"rclone_sync.md",
|
||||
"rclone_move.md",
|
||||
"rclone_delete.md",
|
||||
"rclone_purge.md",
|
||||
"rclone_mkdir.md",
|
||||
"rclone_rmdir.md",
|
||||
"rclone_check.md",
|
||||
"rclone_ls.md",
|
||||
"rclone_lsd.md",
|
||||
"rclone_lsl.md",
|
||||
"rclone_md5sum.md",
|
||||
"rclone_sha1sum.md",
|
||||
"rclone_size.md",
|
||||
"rclone_version.md",
|
||||
"rclone_cleanup.md",
|
||||
"rclone_dedupe.md",
|
||||
]
|
||||
|
||||
# Docs which aren't made into outfile
|
||||
ignore_docs = [
|
||||
"downloads.md",
|
||||
"privacy.md",
|
||||
"donate.md",
|
||||
]
|
||||
|
||||
def read_doc(doc):
|
||||
"""Read file as a string"""
|
||||
path = os.path.join(docpath, doc)
|
||||
with open(path) as fd:
|
||||
contents = fd.read()
|
||||
parts = contents.split("---\n", 2)
|
||||
if len(parts) != 3:
|
||||
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
|
||||
contents = parts[2].strip()+"\n\n"
|
||||
# Remove icons
|
||||
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
|
||||
# Make [...](/links/) absolute
|
||||
contents = re.sub(r'\((\/.*?\/)\)', r"(https://rclone.org\1)", contents)
|
||||
return contents
|
||||
|
||||
def check_docs(docpath):
|
||||
"""Check all the docs are in docpath"""
|
||||
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
|
||||
files -= set(ignore_docs)
|
||||
docs_set = set(docs)
|
||||
if files == docs_set:
|
||||
return
|
||||
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
|
||||
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
|
||||
raise ValueError("Missing files")
|
||||
|
||||
def read_command(command):
|
||||
doc = read_doc("commands/"+command)
|
||||
doc = re.sub(r"### Options inherited from parent commands.*$", "", doc, 0, re.S)
|
||||
doc = doc.strip()+"\n"
|
||||
return doc
|
||||
|
||||
def read_commands(docpath):
|
||||
"""Reads the commands an makes them into a single page"""
|
||||
files = set(f for f in os.listdir(docpath + "/commands") if f.endswith(".md"))
|
||||
docs = []
|
||||
for command in commands_order:
|
||||
docs.append(read_command(command))
|
||||
files.remove(command)
|
||||
for command in sorted(files):
|
||||
if command != "rclone.md":
|
||||
docs.append(read_command(command))
|
||||
return "\n".join(docs)
|
||||
|
||||
def main():
|
||||
check_docs(docpath)
|
||||
command_docs = read_commands(docpath)
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % datetime.now().strftime("%b %d, %Y"))
|
||||
for doc in docs:
|
||||
contents = read_doc(doc)
|
||||
# Substitute the commands into doc.md
|
||||
if doc == "docs.md":
|
||||
contents = re.sub(r"The main rclone commands.*?for the full list.", command_docs, contents, 0, re.S)
|
||||
out.write(contents)
|
||||
print "Written '%s'" % outfile
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
146
bin/make_test_files.go
Normal file
146
bin/make_test_files.go
Normal file
@@ -0,0 +1,146 @@
|
||||
// +build ignore
|
||||
|
||||
// Build a directory structure with the required number of files in
|
||||
//
|
||||
// Run with go run make_test_files.go [flag] <directory>
|
||||
package main
|
||||
|
||||
import (
|
||||
cryptrand "crypto/rand"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
|
||||
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
|
||||
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy")
|
||||
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
|
||||
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
|
||||
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
|
||||
maxFileNameLength = flag.Int("max-name-length", 12, "Maximum size of files to create")
|
||||
|
||||
directoriesToCreate int
|
||||
totalDirectories int
|
||||
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
|
||||
)
|
||||
|
||||
// randomString create a random string for test purposes
|
||||
func randomString(n int) string {
|
||||
const (
|
||||
vowel = "aeiou"
|
||||
consonant = "bcdfghjklmnpqrstvwxyz"
|
||||
digit = "0123456789"
|
||||
)
|
||||
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
|
||||
out := make([]byte, n)
|
||||
p := 0
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// fileName creates a unique random file or directory name
|
||||
func fileName() (name string) {
|
||||
for {
|
||||
length := rand.Intn(*maxFileNameLength-*minFileNameLength) + *minFileNameLength
|
||||
name = randomString(length)
|
||||
if _, found := fileNames[name]; !found {
|
||||
break
|
||||
}
|
||||
}
|
||||
fileNames[name] = struct{}{}
|
||||
return name
|
||||
}
|
||||
|
||||
// dir is a directory in the directory heirachy being built up
|
||||
type dir struct {
|
||||
name string
|
||||
depth int
|
||||
children []*dir
|
||||
parent *dir
|
||||
}
|
||||
|
||||
// Create a random directory heirachy under d
|
||||
func (d *dir) createDirectories() {
|
||||
for totalDirectories < directoriesToCreate {
|
||||
newDir := &dir{
|
||||
name: fileName(),
|
||||
depth: d.depth + 1,
|
||||
parent: d,
|
||||
}
|
||||
d.children = append(d.children, newDir)
|
||||
totalDirectories++
|
||||
switch rand.Intn(4) {
|
||||
case 0:
|
||||
if d.depth < *maxDepth {
|
||||
newDir.createDirectories()
|
||||
}
|
||||
case 1:
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// list the directory heirachy
|
||||
func (d *dir) list(path string, output []string) []string {
|
||||
dirPath := path + "/" + d.name
|
||||
output = append(output, dirPath)
|
||||
for _, subDir := range d.children {
|
||||
output = subDir.list(dirPath, output)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// writeFile writes a random file at dir/name
|
||||
func writeFile(dir, name string) {
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make directory %q: %v", dir, err)
|
||||
}
|
||||
path := filepath.Join(dir, name)
|
||||
fd, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open file %q: %v", path, err)
|
||||
}
|
||||
size := rand.Int63n(*maxFileSize-*minFileSize) + *minFileSize
|
||||
_, err = io.CopyN(fd, cryptrand.Reader, size)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
|
||||
}
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to close file %q: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Require 1 directory argument")
|
||||
}
|
||||
outputDirectory := args[0]
|
||||
log.Printf("Output dir %q", outputDirectory)
|
||||
|
||||
directoriesToCreate = *numberOfFiles / *averageFilesPerDirectory
|
||||
log.Printf("directoriesToCreate %v", directoriesToCreate)
|
||||
root := &dir{name: outputDirectory, depth: 1}
|
||||
for totalDirectories < directoriesToCreate {
|
||||
root.createDirectories()
|
||||
}
|
||||
dirs := root.list("", []string{})
|
||||
for i := 0; i < *numberOfFiles; i++ {
|
||||
dir := dirs[rand.Intn(len(dirs))]
|
||||
writeFile(dir, fileName())
|
||||
}
|
||||
}
|
||||
4
bin/travis.rclone.conf
Normal file
4
bin/travis.rclone.conf
Normal file
@@ -0,0 +1,4 @@
|
||||
# Encrypted rclone configuration File
|
||||
|
||||
RCLONE_ENCRYPT_V0:
|
||||
XIkAr3p+y+zai82cHFH8UoW1y1XTe6dpTzo/g4uSwqI2pfsnSSJ4JbAsRZ9nGVpx3NzROKEewlusVHNokiA4/nD4NbT+2DJrpMLg/OtLREICfuRk3tVWPKLGsmA+TLKU+IfQMO4LfrrCe2DF/lW0qA5Xu16E0Vn++jNhbwW2oB+JTkaGka8Ae3CyisM/3NUGnCOG/yb5wLH7ybUstNYPHsNFCiU1brFXQ4DNIbUFMmca+5S44vrOWvhp9QijQXlG7/JjwrkqbB/LK2gMJPTuhY2OW+4tRw1IoCXbWmwJXv5xmhPqanW92A==
|
||||
46
bin/update-authors.py
Executable file
46
bin/update-authors.py
Executable file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Update the authors.md file with the authors from the git log
|
||||
"""
|
||||
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
AUTHORS = "docs/content/authors.md"
|
||||
IGNORE = [ "nick@raig-wood.com" ]
|
||||
|
||||
def load():
|
||||
"""
|
||||
returns a set of emails already in authors.md
|
||||
"""
|
||||
with open(AUTHORS) as fd:
|
||||
authors = fd.read()
|
||||
emails = set(re.findall(r"<(.*?)>", authors))
|
||||
emails.update(IGNORE)
|
||||
return emails
|
||||
|
||||
def add_email(name, email):
|
||||
"""
|
||||
adds the email passed in to the end of authors.md
|
||||
"""
|
||||
print "Adding %s <%s>" % (name, email)
|
||||
with open(AUTHORS, "a+") as fd:
|
||||
print >>fd, " * %s <%s>" % (name, email)
|
||||
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
|
||||
|
||||
def main():
|
||||
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
|
||||
|
||||
previous = load()
|
||||
for line in out.split("\n"):
|
||||
line = line.strip()
|
||||
if line == "":
|
||||
continue
|
||||
name, email = line.split("|")
|
||||
if email in previous:
|
||||
continue
|
||||
previous.add(email)
|
||||
add_email(name, email)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
bin/upload-github
Executable file
50
bin/upload-github
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Upload a release
|
||||
#
|
||||
# Needs github-release from https://github.com/aktau/github-release
|
||||
|
||||
set -e
|
||||
|
||||
REPO="rclone"
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
echo "Syntax: $0 Version"
|
||||
exit 1
|
||||
fi
|
||||
VERSION="$1"
|
||||
if [ "$GITHUB_USER" == "" ]; then
|
||||
echo 1>&2 "Need GITHUB_USER environment variable"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$GITHUB_TOKEN" == "" ]; then
|
||||
echo 1>&2 "Need GITHUB_TOKEN environment variable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Making release ${VERSION}"
|
||||
github-release release \
|
||||
--repo ${REPO} \
|
||||
--tag ${VERSION} \
|
||||
--name "rclone" \
|
||||
--description "Rclone - rsync for cloud storage. Sync files to and from many cloud storage providers."
|
||||
|
||||
for build in `ls build | grep -v current`; do
|
||||
echo "Uploading ${build}"
|
||||
base="${build%.*}"
|
||||
parts=(${base//-/ })
|
||||
os=${parts[3]}
|
||||
arch=${parts[4]}
|
||||
|
||||
github-release upload \
|
||||
--repo ${REPO} \
|
||||
--tag ${VERSION} \
|
||||
--name "${build}" \
|
||||
--file build/${build}
|
||||
done
|
||||
|
||||
github-release info \
|
||||
--repo ${REPO} \
|
||||
--tag ${VERSION}
|
||||
|
||||
echo "Done"
|
||||
5
bin/win-build.bat
Normal file
5
bin/win-build.bat
Normal file
@@ -0,0 +1,5 @@
|
||||
@echo off
|
||||
echo Setting environment variables for mingw+WinFsp compile
|
||||
set GOPATH=X:\go
|
||||
set PATH=C:\Program Files\mingw-w64\i686-7.1.0-win32-dwarf-rt_v5-rev0\mingw32\bin;%PATH%
|
||||
set CPATH=C:\Program Files\WinFsp\inc\fuse
|
||||
43
cmd/all/all.go
Normal file
43
cmd/all/all.go
Normal file
@@ -0,0 +1,43 @@
|
||||
// Package all imports all the commands
|
||||
package all
|
||||
|
||||
import (
|
||||
// Active commands
|
||||
_ "github.com/ncw/rclone/cmd"
|
||||
_ "github.com/ncw/rclone/cmd/authorize"
|
||||
_ "github.com/ncw/rclone/cmd/cat"
|
||||
_ "github.com/ncw/rclone/cmd/check"
|
||||
_ "github.com/ncw/rclone/cmd/cleanup"
|
||||
_ "github.com/ncw/rclone/cmd/cmount"
|
||||
_ "github.com/ncw/rclone/cmd/config"
|
||||
_ "github.com/ncw/rclone/cmd/copy"
|
||||
_ "github.com/ncw/rclone/cmd/copyto"
|
||||
_ "github.com/ncw/rclone/cmd/cryptcheck"
|
||||
_ "github.com/ncw/rclone/cmd/dbhashsum"
|
||||
_ "github.com/ncw/rclone/cmd/dedupe"
|
||||
_ "github.com/ncw/rclone/cmd/delete"
|
||||
_ "github.com/ncw/rclone/cmd/genautocomplete"
|
||||
_ "github.com/ncw/rclone/cmd/gendocs"
|
||||
_ "github.com/ncw/rclone/cmd/info"
|
||||
_ "github.com/ncw/rclone/cmd/listremotes"
|
||||
_ "github.com/ncw/rclone/cmd/ls"
|
||||
_ "github.com/ncw/rclone/cmd/ls2"
|
||||
_ "github.com/ncw/rclone/cmd/lsd"
|
||||
_ "github.com/ncw/rclone/cmd/lsjson"
|
||||
_ "github.com/ncw/rclone/cmd/lsl"
|
||||
_ "github.com/ncw/rclone/cmd/md5sum"
|
||||
_ "github.com/ncw/rclone/cmd/memtest"
|
||||
_ "github.com/ncw/rclone/cmd/mkdir"
|
||||
_ "github.com/ncw/rclone/cmd/mount"
|
||||
_ "github.com/ncw/rclone/cmd/move"
|
||||
_ "github.com/ncw/rclone/cmd/moveto"
|
||||
_ "github.com/ncw/rclone/cmd/ncdu"
|
||||
_ "github.com/ncw/rclone/cmd/obscure"
|
||||
_ "github.com/ncw/rclone/cmd/purge"
|
||||
_ "github.com/ncw/rclone/cmd/rmdir"
|
||||
_ "github.com/ncw/rclone/cmd/rmdirs"
|
||||
_ "github.com/ncw/rclone/cmd/sha1sum"
|
||||
_ "github.com/ncw/rclone/cmd/size"
|
||||
_ "github.com/ncw/rclone/cmd/sync"
|
||||
_ "github.com/ncw/rclone/cmd/version"
|
||||
)
|
||||
45
cmd/atexit.go
Normal file
45
cmd/atexit.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package cmd
|
||||
|
||||
// Atexit handling
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
var (
|
||||
atExitFns []func()
|
||||
atExitOnce sync.Once
|
||||
atExitRegisterOnce sync.Once
|
||||
)
|
||||
|
||||
// AtExit registers a function to be added on exit
|
||||
func AtExit(fn func()) {
|
||||
atExitFns = append(atExitFns, fn)
|
||||
// Run AtExit handlers on SIGINT or SIGTERM so everything gets
|
||||
// tidied up properly
|
||||
atExitRegisterOnce.Do(func() {
|
||||
go func() {
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, os.Interrupt) // syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT
|
||||
sig := <-ch
|
||||
fs.Infof(nil, "Signal received: %s", sig)
|
||||
runAtExitFunctions()
|
||||
fs.Infof(nil, "Exiting...")
|
||||
os.Exit(0)
|
||||
}()
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// Runs all the AtExit functions if they haven't been run already
|
||||
func runAtExitFunctions() {
|
||||
atExitOnce.Do(func() {
|
||||
for _, fn := range atExitFns {
|
||||
fn()
|
||||
}
|
||||
})
|
||||
}
|
||||
24
cmd/authorize/authorize.go
Normal file
24
cmd/authorize/authorize.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package authorize
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "authorize",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `
|
||||
Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
fs.Authorize(args)
|
||||
},
|
||||
}
|
||||
80
cmd/cat/cat.go
Normal file
80
cmd/cat/cat.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package cat
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
head = int64(0)
|
||||
tail = int64(0)
|
||||
offset = int64(0)
|
||||
count = int64(-1)
|
||||
discard = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().Int64VarP(&head, "head", "", head, "Only print the first N characters.")
|
||||
commandDefintion.Flags().Int64VarP(&tail, "tail", "", tail, "Only print the last N characters.")
|
||||
commandDefintion.Flags().Int64VarP(&offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
|
||||
commandDefintion.Flags().Int64VarP(&count, "count", "", count, "Only print N characters.")
|
||||
commandDefintion.Flags().BoolVarP(&discard, "discard", "", discard, "Discard the output instead of printing.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "cat remote:path",
|
||||
Short: `Concatenates any files and sends them to stdout.`,
|
||||
Long: `
|
||||
rclone cat sends any files to standard output.
|
||||
|
||||
You can use it like this to output a single file
|
||||
|
||||
rclone cat remote:path/to/file
|
||||
|
||||
Or like this to output any file in dir or subdirectories.
|
||||
|
||||
rclone cat remote:path/to/dir
|
||||
|
||||
Or like this to output any .txt files in dir or subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|
||||
Use the --head flag to print characters only at the start, --tail for
|
||||
the end and --offset and --count to print a section in the middle.
|
||||
Note that if offset is negative it will count from the end, so
|
||||
--offset -1 --count 1 is equivalent to --tail 1.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
usedOffset := offset != 0 || count >= 0
|
||||
usedHead := head > 0
|
||||
usedTail := tail > 0
|
||||
if usedHead && usedTail || usedHead && usedOffset || usedTail && usedOffset {
|
||||
log.Fatalf("Can only use one of --head, --tail or --offset with --count")
|
||||
}
|
||||
if head > 0 {
|
||||
offset = 0
|
||||
count = head
|
||||
}
|
||||
if tail > 0 {
|
||||
offset = -tail
|
||||
count = -1
|
||||
}
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
var w io.Writer = os.Stdout
|
||||
if discard {
|
||||
w = ioutil.Discard
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.Cat(fsrc, w, offset, count)
|
||||
})
|
||||
},
|
||||
}
|
||||
45
cmd/check/check.go
Normal file
45
cmd/check/check.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
download = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "check source:path dest:path",
|
||||
Short: `Checks the files in the source and destination match.`,
|
||||
Long: `
|
||||
Checks the files in the source and destination match. It compares
|
||||
sizes and hashes (MD5 or SHA1) and logs a report of files which don't
|
||||
match. It doesn't alter the source or destination.
|
||||
|
||||
If you supply the --size-only flag, it will only compare the sizes not
|
||||
the hashes as well. Use this for a quick check.
|
||||
|
||||
If you supply the --download flag, it will download the data from
|
||||
both remotes and check them against each other on the fly. This can
|
||||
be useful for remotes that don't support hashes or if you really want
|
||||
to check all the data.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if download {
|
||||
return fs.CheckDownload(fdst, fsrc)
|
||||
}
|
||||
return fs.Check(fdst, fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
27
cmd/cleanup/cleanup.go
Normal file
27
cmd/cleanup/cleanup.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package cleanup
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible`,
|
||||
Long: `
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return fs.CleanUp(fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
356
cmd/cmd.go
Normal file
356
cmd/cmd.go
Normal file
@@ -0,0 +1,356 @@
|
||||
// Package cmd implemnts the rclone command
|
||||
//
|
||||
// It is in a sub package so it's internals can be re-used elsewhere
|
||||
package cmd
|
||||
|
||||
// FIXME only attach the remote flags when using a remote???
|
||||
// would probably mean bringing all the flags in to here? Or define some flagsets in fs...
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cpuProfile = fs.StringP("cpuprofile", "", "", "Write cpu profile to file")
|
||||
memProfile = fs.StringP("memprofile", "", "", "Write memory profile to file")
|
||||
statsInterval = fs.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
|
||||
dataRateUnit = fs.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
|
||||
version bool
|
||||
retries = fs.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
||||
)
|
||||
|
||||
// Root is the main rclone command
|
||||
var Root = &cobra.Command{
|
||||
Use: "rclone",
|
||||
Short: "Sync files and directories to and from local and remote object stores - " + fs.Version,
|
||||
Long: `
|
||||
Rclone is a command line program to sync files and directories to and
|
||||
from various cloud storage systems and using file transfer services, such as:
|
||||
|
||||
* Google Drive
|
||||
* Amazon S3
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Amazon Drive
|
||||
* Microsoft OneDrive
|
||||
* Hubic
|
||||
* Backblaze B2
|
||||
* Yandex Disk
|
||||
* SFTP
|
||||
* FTP
|
||||
* HTTP
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync (one way) mode to make a directory identical
|
||||
* Check mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
|
||||
* https://rclone.org/
|
||||
`,
|
||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {
|
||||
fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args)
|
||||
runAtExitFunctions()
|
||||
},
|
||||
}
|
||||
|
||||
// runRoot implements the main rclone command with no subcommands
|
||||
func runRoot(cmd *cobra.Command, args []string) {
|
||||
if version {
|
||||
ShowVersion()
|
||||
os.Exit(0)
|
||||
} else {
|
||||
_ = Root.Usage()
|
||||
fmt.Fprintf(os.Stderr, "Command not found.\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
Root.Run = runRoot
|
||||
Root.Flags().BoolVarP(&version, "version", "V", false, "Print the version number")
|
||||
cobra.OnInitialize(initConfig)
|
||||
}
|
||||
|
||||
// ShowVersion prints the version to stdout
|
||||
func ShowVersion() {
|
||||
fmt.Printf("rclone %s\n", fs.Version)
|
||||
}
|
||||
|
||||
// newFsFile creates a dst Fs from a name but may point to a file.
|
||||
//
|
||||
// It returns a string with the file name if points to a file
|
||||
func newFsFile(remote string) (fs.Fs, string) {
|
||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
f, err := fsInfo.NewFs(configName, fsPath)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
return f, path.Base(fsPath)
|
||||
case nil:
|
||||
return f, ""
|
||||
default:
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
// newFsSrc creates a src Fs from a name
|
||||
//
|
||||
// It returns a string with the file name if limiting to one file
|
||||
//
|
||||
// This can point to a file
|
||||
func newFsSrc(remote string) (fs.Fs, string) {
|
||||
f, fileName := newFsFile(remote)
|
||||
if fileName != "" {
|
||||
if !fs.Config.Filter.InActive() {
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Can't limit to single files when using filters: %v", remote)
|
||||
}
|
||||
// Limit transfers to this file
|
||||
err := fs.Config.Filter.AddFile(fileName)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
|
||||
}
|
||||
// Set --no-traverse as only one file
|
||||
fs.Config.NoTraverse = true
|
||||
}
|
||||
return f, fileName
|
||||
}
|
||||
|
||||
// newFsDst creates a dst Fs from a name
|
||||
//
|
||||
// This must point to a directory
|
||||
func newFsDst(remote string) fs.Fs {
|
||||
f, err := fs.NewFs(remote)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// NewFsSrcDst creates a new src and dst fs from the arguments
|
||||
func NewFsSrcDst(args []string) (fs.Fs, fs.Fs) {
|
||||
fsrc, _ := newFsSrc(args[0])
|
||||
fdst := newFsDst(args[1])
|
||||
fs.CalculateModifyWindow(fdst, fsrc)
|
||||
return fsrc, fdst
|
||||
}
|
||||
|
||||
// NewFsSrcDstFiles creates a new src and dst fs from the arguments
|
||||
// If src is a file then srcFileName and dstFileName will be non-empty
|
||||
func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs, dstFileName string) {
|
||||
fsrc, srcFileName = newFsSrc(args[0])
|
||||
// If copying a file...
|
||||
dstRemote := args[1]
|
||||
if srcFileName != "" {
|
||||
dstRemote, dstFileName = fs.RemoteSplit(dstRemote)
|
||||
if dstRemote == "" {
|
||||
dstRemote = "."
|
||||
}
|
||||
if dstFileName == "" {
|
||||
log.Fatalf("%q is a directory", args[1])
|
||||
}
|
||||
}
|
||||
fdst = newFsDst(dstRemote)
|
||||
fs.CalculateModifyWindow(fdst, fsrc)
|
||||
return
|
||||
}
|
||||
|
||||
// NewFsSrc creates a new src fs from the arguments
|
||||
func NewFsSrc(args []string) fs.Fs {
|
||||
fsrc, _ := newFsSrc(args[0])
|
||||
fs.CalculateModifyWindow(fsrc)
|
||||
return fsrc
|
||||
}
|
||||
|
||||
// NewFsDst creates a new dst fs from the arguments
|
||||
//
|
||||
// Dst fs-es can't point to single files
|
||||
func NewFsDst(args []string) fs.Fs {
|
||||
fdst := newFsDst(args[0])
|
||||
fs.CalculateModifyWindow(fdst)
|
||||
return fdst
|
||||
}
|
||||
|
||||
// ShowStats returns true if the user added a `--stats` flag to the command line.
|
||||
//
|
||||
// This is called by Run to override the default value of the
|
||||
// showStats passed in.
|
||||
func ShowStats() bool {
|
||||
statsIntervalFlag := pflag.Lookup("stats")
|
||||
return statsIntervalFlag != nil && statsIntervalFlag.Changed
|
||||
}
|
||||
|
||||
// Run the function with stats and retries if required
|
||||
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
var err error
|
||||
var stopStats chan struct{}
|
||||
if !showStats && ShowStats() {
|
||||
showStats = true
|
||||
}
|
||||
if showStats {
|
||||
stopStats = StartStats()
|
||||
}
|
||||
for try := 1; try <= *retries; try++ {
|
||||
err = f()
|
||||
if !Retry || (err == nil && !fs.Stats.Errored()) {
|
||||
if try > 1 {
|
||||
fs.Errorf(nil, "Attempt %d/%d succeeded", try, *retries)
|
||||
}
|
||||
break
|
||||
}
|
||||
if fs.IsFatalError(err) {
|
||||
fs.Errorf(nil, "Fatal error received - not attempting retries")
|
||||
break
|
||||
}
|
||||
if fs.IsNoRetryError(err) {
|
||||
fs.Errorf(nil, "Can't retry this error - not attempting retries")
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
|
||||
} else {
|
||||
fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
|
||||
}
|
||||
if try < *retries {
|
||||
fs.Stats.ResetErrors()
|
||||
}
|
||||
}
|
||||
if showStats {
|
||||
close(stopStats)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to %s: %v", cmd.Name(), err)
|
||||
}
|
||||
if showStats && (fs.Stats.Errored() || *statsInterval > 0) {
|
||||
fs.Stats.Log()
|
||||
}
|
||||
fs.Debugf(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
|
||||
if fs.Stats.Errored() {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckArgs checks there are enough arguments and prints a message if not
|
||||
func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
|
||||
if len(args) < MinArgs {
|
||||
_ = cmd.Usage()
|
||||
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments mininum\n", cmd.Name(), MinArgs)
|
||||
os.Exit(1)
|
||||
} else if len(args) > MaxArgs {
|
||||
_ = cmd.Usage()
|
||||
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// StartStats prints the stats every statsInterval
|
||||
//
|
||||
// It returns a channel which should be closed to stop the stats.
|
||||
func StartStats() chan struct{} {
|
||||
stopStats := make(chan struct{})
|
||||
if *statsInterval > 0 {
|
||||
go func() {
|
||||
ticker := time.NewTicker(*statsInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
fs.Stats.Log()
|
||||
case <-stopStats:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
return stopStats
|
||||
}
|
||||
|
||||
// initConfig is run by cobra after initialising the flags
|
||||
func initConfig() {
|
||||
// Start the logger
|
||||
fs.InitLogging()
|
||||
|
||||
// Load the rest of the config now we have started the logger
|
||||
fs.LoadConfig()
|
||||
|
||||
// Write the args for debug purposes
|
||||
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
|
||||
|
||||
// Setup CPU profiling if desired
|
||||
if *cpuProfile != "" {
|
||||
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
|
||||
f, err := os.Create(*cpuProfile)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = pprof.StartCPUProfile(f)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatal(err)
|
||||
}
|
||||
AtExit(func() {
|
||||
pprof.StopCPUProfile()
|
||||
})
|
||||
}
|
||||
|
||||
// Setup memory profiling if desired
|
||||
if *memProfile != "" {
|
||||
AtExit(func() {
|
||||
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
|
||||
f, err := os.Create(*memProfile)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = pprof.WriteHeapProfile(f)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if m, _ := regexp.MatchString("^(bits|bytes)$", *dataRateUnit); m == false {
|
||||
fs.Errorf(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.")
|
||||
fs.Config.DataRateUnit = "bytes"
|
||||
} else {
|
||||
fs.Config.DataRateUnit = *dataRateUnit
|
||||
}
|
||||
}
|
||||
663
cmd/cmount/fs.go
Normal file
663
cmd/cmount/fs.go
Normal file
@@ -0,0 +1,663 @@
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const fhUnset = ^uint64(0)
|
||||
|
||||
// FS represents the top level filing system
|
||||
type FS struct {
|
||||
FS *mountlib.FS
|
||||
f fs.Fs
|
||||
openDirs *openFiles
|
||||
openFilesWr *openFiles
|
||||
openFilesRd *openFiles
|
||||
ready chan (struct{})
|
||||
}
|
||||
|
||||
// NewFS makes a new FS
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
fsys := &FS{
|
||||
FS: mountlib.NewFS(f),
|
||||
f: f,
|
||||
openDirs: newOpenFiles(0x01),
|
||||
openFilesWr: newOpenFiles(0x02),
|
||||
openFilesRd: newOpenFiles(0x03),
|
||||
ready: make(chan (struct{})),
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
|
||||
type openFiles struct {
|
||||
mu sync.Mutex
|
||||
mark uint8
|
||||
nodes []mountlib.Noder
|
||||
}
|
||||
|
||||
func newOpenFiles(mark uint8) *openFiles {
|
||||
return &openFiles{
|
||||
mark: mark,
|
||||
}
|
||||
}
|
||||
|
||||
// Open a node returning a file handle
|
||||
func (of *openFiles) Open(node mountlib.Noder) (fh uint64) {
|
||||
of.mu.Lock()
|
||||
defer of.mu.Unlock()
|
||||
var i int
|
||||
var oldNode mountlib.Noder
|
||||
for i, oldNode = range of.nodes {
|
||||
if oldNode == nil {
|
||||
of.nodes[i] = node
|
||||
goto found
|
||||
}
|
||||
}
|
||||
of.nodes = append(of.nodes, node)
|
||||
i = len(of.nodes) - 1
|
||||
found:
|
||||
return uint64((i << 8) | int(of.mark))
|
||||
}
|
||||
|
||||
// InRange to see if this fh could be one of ours
|
||||
func (of *openFiles) InRange(fh uint64) bool {
|
||||
return uint8(fh) == of.mark
|
||||
}
|
||||
|
||||
// get the node for fh, call with the lock held
|
||||
func (of *openFiles) get(fh uint64) (i int, node mountlib.Noder, errc int) {
|
||||
receivedMark := uint8(fh)
|
||||
if receivedMark != of.mark {
|
||||
fs.Debugf(nil, "Bad file handle: bad mark 0x%X != 0x%X: 0x%X", receivedMark, of.mark, fh)
|
||||
return i, nil, -fuse.EBADF
|
||||
}
|
||||
i64 := fh >> 8
|
||||
if i64 > uint64(len(of.nodes)) {
|
||||
fs.Debugf(nil, "Bad file handle: too big: 0x%X", fh)
|
||||
return i, nil, -fuse.EBADF
|
||||
}
|
||||
i = int(i64)
|
||||
node = of.nodes[i]
|
||||
if node == nil {
|
||||
fs.Debugf(nil, "Bad file handle: nil node: 0x%X", fh)
|
||||
return i, nil, -fuse.EBADF
|
||||
}
|
||||
return i, node, 0
|
||||
}
|
||||
|
||||
// Get the node for the file handle
|
||||
func (of *openFiles) Get(fh uint64) (node mountlib.Noder, errc int) {
|
||||
of.mu.Lock()
|
||||
_, node, errc = of.get(fh)
|
||||
of.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Close the node
|
||||
func (of *openFiles) Close(fh uint64) (errc int) {
|
||||
of.mu.Lock()
|
||||
i, _, errc := of.get(fh)
|
||||
if errc == 0 {
|
||||
of.nodes[i] = nil
|
||||
}
|
||||
of.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// lookup a Node given a path
|
||||
func (fsys *FS) lookupNode(path string) (node mountlib.Node, errc int) {
|
||||
node, err := fsys.FS.Lookup(path)
|
||||
return node, translateError(err)
|
||||
}
|
||||
|
||||
// lookup a Dir given a path
|
||||
func (fsys *FS) lookupDir(path string) (dir *mountlib.Dir, errc int) {
|
||||
node, errc := fsys.lookupNode(path)
|
||||
if errc != 0 {
|
||||
return nil, errc
|
||||
}
|
||||
dir, ok := node.(*mountlib.Dir)
|
||||
if !ok {
|
||||
return nil, -fuse.ENOTDIR
|
||||
}
|
||||
return dir, 0
|
||||
}
|
||||
|
||||
// lookup a parent Dir given a path returning the dir and the leaf
|
||||
func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *mountlib.Dir, errc int) {
|
||||
parentDir, leaf := path.Split(filePath)
|
||||
dir, errc = fsys.lookupDir(parentDir)
|
||||
return leaf, dir, errc
|
||||
}
|
||||
|
||||
// lookup a File given a path
|
||||
func (fsys *FS) lookupFile(path string) (file *mountlib.File, errc int) {
|
||||
node, errc := fsys.lookupNode(path)
|
||||
if errc != 0 {
|
||||
return nil, errc
|
||||
}
|
||||
file, ok := node.(*mountlib.File)
|
||||
if !ok {
|
||||
return nil, -fuse.EISDIR
|
||||
}
|
||||
return file, 0
|
||||
}
|
||||
|
||||
// Get the underlying openFile handle from the file handle
|
||||
func (fsys *FS) getOpenFilesFromFh(fh uint64) (of *openFiles, errc int) {
|
||||
switch {
|
||||
case fsys.openFilesRd.InRange(fh):
|
||||
return fsys.openFilesRd, 0
|
||||
case fsys.openFilesWr.InRange(fh):
|
||||
return fsys.openFilesWr, 0
|
||||
case fsys.openDirs.InRange(fh):
|
||||
return fsys.openDirs, 0
|
||||
}
|
||||
return nil, -fuse.EBADF
|
||||
}
|
||||
|
||||
// Get the underlying handle from the file handle
|
||||
func (fsys *FS) getHandleFromFh(fh uint64) (handle mountlib.Noder, errc int) {
|
||||
of, errc := fsys.getOpenFilesFromFh(fh)
|
||||
if errc != 0 {
|
||||
return nil, errc
|
||||
}
|
||||
return of.Get(fh)
|
||||
}
|
||||
|
||||
// get a node from the path or from the fh if not fhUnset
|
||||
func (fsys *FS) getNode(path string, fh uint64) (node mountlib.Node, errc int) {
|
||||
if fh == fhUnset {
|
||||
node, errc = fsys.lookupNode(path)
|
||||
} else {
|
||||
var n mountlib.Noder
|
||||
n, errc = fsys.getHandleFromFh(fh)
|
||||
if errc == 0 {
|
||||
node = n.Node()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// stat fills up the stat block for Node
|
||||
func (fsys *FS) stat(node mountlib.Node, stat *fuse.Stat_t) (errc int) {
|
||||
var Size uint64
|
||||
var Blocks uint64
|
||||
var modTime time.Time
|
||||
var Mode os.FileMode
|
||||
switch x := node.(type) {
|
||||
case *mountlib.Dir:
|
||||
modTime = x.ModTime()
|
||||
Mode = mountlib.DirPerms | fuse.S_IFDIR
|
||||
case *mountlib.File:
|
||||
var err error
|
||||
modTime, Size, Blocks, err = x.Attr(mountlib.NoModTime)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
Mode = mountlib.FilePerms | fuse.S_IFREG
|
||||
}
|
||||
//stat.Dev = 1
|
||||
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
|
||||
stat.Mode = uint32(Mode)
|
||||
stat.Nlink = 1
|
||||
stat.Uid = mountlib.UID
|
||||
stat.Gid = mountlib.GID
|
||||
//stat.Rdev
|
||||
stat.Size = int64(Size)
|
||||
t := fuse.NewTimespec(modTime)
|
||||
stat.Atim = t
|
||||
stat.Mtim = t
|
||||
stat.Ctim = t
|
||||
stat.Blksize = 512
|
||||
stat.Blocks = int64(Blocks)
|
||||
stat.Birthtim = t
|
||||
// fs.Debugf(nil, "stat = %+v", *stat)
|
||||
return 0
|
||||
}
|
||||
|
||||
// Init is called after the filesystem is ready
|
||||
func (fsys *FS) Init() {
|
||||
defer fs.Trace(fsys.f, "")("")
|
||||
close(fsys.ready)
|
||||
}
|
||||
|
||||
// Destroy is called when it is unmounted (note that depending on how
|
||||
// the file system is terminated the file system may not receive the
|
||||
// Destroy call).
|
||||
func (fsys *FS) Destroy() {
|
||||
defer fs.Trace(fsys.f, "")("")
|
||||
}
|
||||
|
||||
// Getattr reads the attributes for path
|
||||
func (fsys *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "fh=0x%X", fh)("errc=%v", &errc)
|
||||
node, errc := fsys.getNode(path, fh)
|
||||
if errc == 0 {
|
||||
errc = fsys.stat(node, stat)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Opendir opens path as a directory
|
||||
func (fsys *FS) Opendir(path string) (errc int, fh uint64) {
|
||||
defer fs.Trace(path, "")("errc=%d, fh=0x%X", &errc, &fh)
|
||||
dir, errc := fsys.lookupDir(path)
|
||||
if errc == 0 {
|
||||
fh = fsys.openDirs.Open(dir)
|
||||
} else {
|
||||
fh = fhUnset
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Readdir reads the directory at dirPath
|
||||
func (fsys *FS) Readdir(dirPath string,
|
||||
fill func(name string, stat *fuse.Stat_t, ofst int64) bool,
|
||||
ofst int64,
|
||||
fh uint64) (errc int) {
|
||||
itemsRead := -1
|
||||
defer fs.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
|
||||
|
||||
node, errc := fsys.openDirs.Get(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
|
||||
dir, ok := node.(*mountlib.Dir)
|
||||
if !ok {
|
||||
return -fuse.ENOTDIR
|
||||
}
|
||||
|
||||
items, err := dir.ReadDirAll()
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Optionally, create a struct stat that describes the file as
|
||||
// for getattr (but FUSE only looks at st_ino and the
|
||||
// file-type bits of st_mode).
|
||||
//
|
||||
// FIXME If you call host.SetCapReaddirPlus() then WinFsp will
|
||||
// use the full stat information - a Useful optimization on
|
||||
// Windows.
|
||||
//
|
||||
// NB we are using the first mode for readdir: The readdir
|
||||
// implementation ignores the offset parameter, and passes
|
||||
// zero to the filler function's offset. The filler function
|
||||
// will not return '1' (unless an error happens), so the whole
|
||||
// directory is read in a single readdir operation.
|
||||
fill(".", nil, 0)
|
||||
fill("..", nil, 0)
|
||||
for _, item := range items {
|
||||
name := path.Base(item.Obj.Remote())
|
||||
fill(name, nil, 0)
|
||||
}
|
||||
itemsRead = len(items)
|
||||
return 0
|
||||
}
|
||||
|
||||
// Releasedir finished reading the directory
|
||||
func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
|
||||
return fsys.openDirs.Close(fh)
|
||||
}
|
||||
|
||||
// Statfs reads overall stats on the filessystem
|
||||
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
||||
defer fs.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
|
||||
const blockSize = 4096
|
||||
fsBlocks := uint64(1 << 50)
|
||||
if runtime.GOOS == "windows" {
|
||||
fsBlocks = (1 << 43) - 1
|
||||
}
|
||||
stat.Blocks = fsBlocks // Total data blocks in file system.
|
||||
stat.Bfree = fsBlocks // Free blocks in file system.
|
||||
stat.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
stat.Files = 1E9 // Total files in file system.
|
||||
stat.Ffree = 1E9 // Free files in file system.
|
||||
stat.Bsize = blockSize // Block size
|
||||
stat.Namemax = 255 // Maximum file name length?
|
||||
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
return 0
|
||||
}
|
||||
|
||||
// Open opens a file
|
||||
func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
|
||||
defer fs.Trace(path, "flags=0x%X", flags)("errc=%d, fh=0x%X", &errc, &fh)
|
||||
file, errc := fsys.lookupFile(path)
|
||||
if errc != 0 {
|
||||
return errc, fhUnset
|
||||
}
|
||||
rdwrMode := flags & fuse.O_ACCMODE
|
||||
var err error
|
||||
var handle mountlib.Noder
|
||||
switch {
|
||||
case rdwrMode == fuse.O_RDONLY:
|
||||
handle, err = file.OpenRead()
|
||||
if err != nil {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
return 0, fsys.openFilesRd.Open(handle)
|
||||
case rdwrMode == fuse.O_WRONLY || (rdwrMode == fuse.O_RDWR && (flags&fuse.O_TRUNC) != 0):
|
||||
handle, err = file.OpenWrite()
|
||||
if err != nil {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
return 0, fsys.openFilesWr.Open(handle)
|
||||
case rdwrMode == fuse.O_RDWR:
|
||||
fs.Errorf(path, "Can't open for Read and Write")
|
||||
return -fuse.EPERM, fhUnset
|
||||
}
|
||||
fs.Errorf(path, "Can't figure out how to open with flags: 0x%X", flags)
|
||||
return -fuse.EPERM, fhUnset
|
||||
}
|
||||
|
||||
// Create creates and opens a file.
|
||||
func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh uint64) {
|
||||
defer fs.Trace(filePath, "flags=0x%X, mode=0%o", flags, mode)("errc=%d, fh=0x%X", &errc, &fh)
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
|
||||
if errc != 0 {
|
||||
return errc, fhUnset
|
||||
}
|
||||
_, handle, err := parentDir.Create(leaf)
|
||||
if err != nil {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
return 0, fsys.openFilesWr.Open(handle)
|
||||
}
|
||||
|
||||
// Truncate truncates a file to size
|
||||
func (fsys *FS) Truncate(path string, size int64, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "size=%d, fh=0x%X", size, fh)("errc=%d", &errc)
|
||||
node, errc := fsys.getNode(path, fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
file, ok := node.(*mountlib.File)
|
||||
if !ok {
|
||||
return -fuse.EIO
|
||||
}
|
||||
// Read the size so far
|
||||
_, currentSize, _, err := file.Attr(true)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
fs.Debugf(path, "truncate to %d, currentSize %d", size, currentSize)
|
||||
if int64(currentSize) != size {
|
||||
fs.Errorf(path, "Can't truncate files")
|
||||
return -fuse.EPERM
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||
defer fs.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
|
||||
// FIXME detect seek
|
||||
handle, errc := fsys.openFilesRd.Get(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
rfh, ok := handle.(*mountlib.ReadFileHandle)
|
||||
if !ok {
|
||||
// Can only read from read file handle
|
||||
return -fuse.EIO
|
||||
}
|
||||
data, err := rfh.Read(int64(len(buff)), ofst)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
n = copy(buff, data)
|
||||
return n
|
||||
}
|
||||
|
||||
func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||
defer fs.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
|
||||
// FIXME detect seek
|
||||
handle, errc := fsys.openFilesWr.Get(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
wfh, ok := handle.(*mountlib.WriteFileHandle)
|
||||
if !ok {
|
||||
// Can only write to write file handle
|
||||
return -fuse.EIO
|
||||
}
|
||||
// FIXME made Write return int and Read take int since must fit in RAM
|
||||
n64, err := wfh.Write(buff, ofst)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
return int(n64)
|
||||
}
|
||||
|
||||
// Flush flushes an open file descriptor or path
|
||||
func (fsys *FS) Flush(path string, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
|
||||
handle, errc := fsys.getHandleFromFh(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
var err error
|
||||
switch x := handle.(type) {
|
||||
case *mountlib.ReadFileHandle:
|
||||
err = x.Flush()
|
||||
case *mountlib.WriteFileHandle:
|
||||
err = x.Flush()
|
||||
default:
|
||||
return -fuse.EIO
|
||||
}
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Release closes the file if still open
|
||||
func (fsys *FS) Release(path string, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
|
||||
of, errc := fsys.getOpenFilesFromFh(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
handle, errc := of.Get(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
_ = of.Close(fh)
|
||||
var err error
|
||||
switch x := handle.(type) {
|
||||
case *mountlib.ReadFileHandle:
|
||||
err = x.Release()
|
||||
case *mountlib.WriteFileHandle:
|
||||
err = x.Release()
|
||||
default:
|
||||
return -fuse.EIO
|
||||
}
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Unlink removes a file.
|
||||
func (fsys *FS) Unlink(filePath string) (errc int) {
|
||||
defer fs.Trace(filePath, "")("errc=%d", &errc)
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
return translateError(parentDir.Remove(leaf))
|
||||
}
|
||||
|
||||
// Mkdir creates a directory.
|
||||
func (fsys *FS) Mkdir(dirPath string, mode uint32) (errc int) {
|
||||
defer fs.Trace(dirPath, "mode=0%o", mode)("errc=%d", &errc)
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
_, err := parentDir.Mkdir(leaf)
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Rmdir removes a directory
|
||||
func (fsys *FS) Rmdir(dirPath string) (errc int) {
|
||||
defer fs.Trace(dirPath, "")("errc=%d", &errc)
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
return translateError(parentDir.Remove(leaf))
|
||||
}
|
||||
|
||||
// Rename renames a file.
|
||||
func (fsys *FS) Rename(oldPath string, newPath string) (errc int) {
|
||||
defer fs.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc)
|
||||
oldLeaf, oldParentDir, errc := fsys.lookupParentDir(oldPath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
newLeaf, newParentDir, errc := fsys.lookupParentDir(newPath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
return translateError(oldParentDir.Rename(oldLeaf, newLeaf, newParentDir))
|
||||
}
|
||||
|
||||
// Utimens changes the access and modification times of a file.
|
||||
func (fsys *FS) Utimens(path string, tmsp []fuse.Timespec) (errc int) {
|
||||
defer fs.Trace(path, "tmsp=%+v", tmsp)("errc=%d", &errc)
|
||||
node, errc := fsys.lookupNode(path)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
var t time.Time
|
||||
if tmsp == nil || len(tmsp) < 2 {
|
||||
t = time.Now()
|
||||
} else {
|
||||
t = tmsp[1].Time()
|
||||
}
|
||||
var err error
|
||||
switch x := node.(type) {
|
||||
case *mountlib.Dir:
|
||||
err = x.SetModTime(t)
|
||||
case *mountlib.File:
|
||||
err = x.SetModTime(t)
|
||||
}
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Mknod creates a file node.
|
||||
func (fsys *FS) Mknod(path string, mode uint32, dev uint64) (errc int) {
|
||||
defer fs.Trace(path, "mode=0x%X, dev=0x%X", mode, dev)("errc=%d", &errc)
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Fsync synchronizes file contents.
|
||||
func (fsys *FS) Fsync(path string, datasync bool, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
|
||||
// This is a no-op for rclone
|
||||
return 0
|
||||
}
|
||||
|
||||
// Link creates a hard link to a file.
|
||||
func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
|
||||
defer fs.Trace(oldpath, "newpath=%q", newpath)("errc=%d", &errc)
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Symlink creates a symbolic link.
|
||||
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
|
||||
defer fs.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Readlink reads the target of a symbolic link.
|
||||
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
|
||||
defer fs.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
|
||||
return -fuse.ENOSYS, ""
|
||||
}
|
||||
|
||||
// Chmod changes the permission bits of a file.
|
||||
func (fsys *FS) Chmod(path string, mode uint32) (errc int) {
|
||||
defer fs.Trace(path, "mode=0%o", mode)("errc=%d", &errc)
|
||||
// This is a no-op for rclone
|
||||
return 0
|
||||
}
|
||||
|
||||
// Chown changes the owner and group of a file.
|
||||
func (fsys *FS) Chown(path string, uid uint32, gid uint32) (errc int) {
|
||||
defer fs.Trace(path, "uid=%d, gid=%d", uid, gid)("errc=%d", &errc)
|
||||
// This is a no-op for rclone
|
||||
return 0
|
||||
}
|
||||
|
||||
// Access checks file access permissions.
|
||||
func (fsys *FS) Access(path string, mask uint32) (errc int) {
|
||||
defer fs.Trace(path, "mask=0%o", mask)("errc=%d", &errc)
|
||||
// This is a no-op for rclone
|
||||
return 0
|
||||
}
|
||||
|
||||
// Fsyncdir synchronizes directory contents.
|
||||
func (fsys *FS) Fsyncdir(path string, datasync bool, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
|
||||
// This is a no-op for rclone
|
||||
return 0
|
||||
}
|
||||
|
||||
// Setxattr sets extended attributes.
|
||||
func (fsys *FS) Setxattr(path string, name string, value []byte, flags int) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Getxattr gets extended attributes.
|
||||
func (fsys *FS) Getxattr(path string, name string) (errc int, value []byte) {
|
||||
return -fuse.ENOSYS, nil
|
||||
}
|
||||
|
||||
// Removexattr removes extended attributes.
|
||||
func (fsys *FS) Removexattr(path string, name string) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Listxattr lists extended attributes.
|
||||
func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Translate errors from mountlib
|
||||
func translateError(err error) (errc int) {
|
||||
if err == nil {
|
||||
return 0
|
||||
}
|
||||
cause := errors.Cause(err)
|
||||
if mErr, ok := cause.(mountlib.Error); ok {
|
||||
switch mErr {
|
||||
case mountlib.OK:
|
||||
return 0
|
||||
case mountlib.ENOENT:
|
||||
return -fuse.ENOENT
|
||||
case mountlib.ENOTEMPTY:
|
||||
return -fuse.ENOTEMPTY
|
||||
case mountlib.EEXIST:
|
||||
return -fuse.EEXIST
|
||||
case mountlib.ESPIPE:
|
||||
return -fuse.ESPIPE
|
||||
case mountlib.EBADF:
|
||||
return -fuse.EBADF
|
||||
case mountlib.EROFS:
|
||||
return -fuse.EROFS
|
||||
}
|
||||
}
|
||||
fs.Errorf(nil, "IO error: %v", err)
|
||||
return -fuse.EIO
|
||||
}
|
||||
203
cmd/cmount/mount.go
Normal file
203
cmd/cmount/mount.go
Normal file
@@ -0,0 +1,203 @@
|
||||
// Package cmount implents a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
// This uses the cgo based cgofuse library
|
||||
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
name := "cmount"
|
||||
if runtime.GOOS == "windows" {
|
||||
name = "mount"
|
||||
}
|
||||
mountlib.NewMountCommand(name, Mount)
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(device string, mountpoint string) (options []string) {
|
||||
// Options
|
||||
options = []string{
|
||||
"-o", "fsname=" + device,
|
||||
"-o", "subtype=rclone",
|
||||
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
||||
}
|
||||
if mountlib.DebugFUSE {
|
||||
options = append(options, "-o", "debug")
|
||||
}
|
||||
|
||||
// OSX options
|
||||
if runtime.GOOS == "darwin" {
|
||||
options = append(options, "-o", "volname="+device)
|
||||
options = append(options, "-o", "noappledouble")
|
||||
options = append(options, "-o", "noapplexattr")
|
||||
}
|
||||
|
||||
// Windows options
|
||||
if runtime.GOOS == "windows" {
|
||||
// These cause WinFsp to mean the current user
|
||||
options = append(options, "-o", "uid=-1")
|
||||
options = append(options, "-o", "gid=-1")
|
||||
options = append(options, "--FileSystemName=rclone")
|
||||
}
|
||||
|
||||
if mountlib.AllowNonEmpty {
|
||||
options = append(options, "-o", "nonempty")
|
||||
}
|
||||
if mountlib.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
}
|
||||
if mountlib.AllowRoot {
|
||||
options = append(options, "-o", "allow_root")
|
||||
}
|
||||
if mountlib.DefaultPermissions {
|
||||
options = append(options, "-o", "default_permissions")
|
||||
}
|
||||
if mountlib.ReadOnly {
|
||||
options = append(options, "-o", "ro")
|
||||
}
|
||||
if mountlib.WritebackCache {
|
||||
// FIXME? options = append(options, "-o", WritebackCache())
|
||||
}
|
||||
for _, option := range *mountlib.ExtraOptions {
|
||||
options = append(options, "-o", option)
|
||||
}
|
||||
for _, option := range *mountlib.ExtraFlags {
|
||||
options = append(options, option)
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
// mount the file system
|
||||
//
|
||||
// The mount point will be ready when this returns.
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(f fs.Fs, mountpoint string) (*mountlib.FS, <-chan error, func() error, error) {
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
// Check the mountpoint - in Windows the mountpoint musn't exist before the mount
|
||||
if runtime.GOOS != "windows" {
|
||||
fi, err := os.Stat(mountpoint)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "mountpoint")
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return nil, nil, nil, errors.New("mountpoint is not a directory")
|
||||
}
|
||||
}
|
||||
|
||||
// Create underlying FS
|
||||
fsys := NewFS(f)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)
|
||||
fs.Debugf(f, "Mounting with options: %q", options)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
var err error
|
||||
ok := host.Mount(mountpoint, options)
|
||||
if !ok {
|
||||
err = errors.New("mount failed")
|
||||
fs.Errorf(f, "Mount failed")
|
||||
}
|
||||
errChan <- err
|
||||
}()
|
||||
|
||||
// unmount
|
||||
unmount := func() error {
|
||||
fs.Debugf(nil, "Calling host.Unmount")
|
||||
if host.Unmount() {
|
||||
fs.Debugf(nil, "host.Unmount succeeded")
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(nil, "host.Unmount failed")
|
||||
return errors.New("host unmount failed")
|
||||
}
|
||||
|
||||
// Wait for the filesystem to become ready, checking the file
|
||||
// system didn't blow up before starting
|
||||
select {
|
||||
case err := <-errChan:
|
||||
err = errors.Wrap(err, "mount stopped before calling Init")
|
||||
return nil, nil, nil, err
|
||||
case <-fsys.ready:
|
||||
}
|
||||
|
||||
// Wait for the mount point to be available on Windows
|
||||
// On Windows the Init signal comes slightly before the mount is ready
|
||||
if runtime.GOOS == "windows" {
|
||||
const totalWait = 10 * time.Second
|
||||
const individualWait = 10 * time.Millisecond
|
||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||
_, err := os.Stat(mountpoint)
|
||||
if err == nil {
|
||||
goto found
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
fs.Errorf(nil, "mountpoint %q didn't became available after %v - continuing anyway", mountpoint, totalWait)
|
||||
found:
|
||||
}
|
||||
|
||||
return fsys.FS, errChan, unmount, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
// Mount it
|
||||
FS, errChan, _, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
// Note cgofuse unmounts the fs on SIGINT etc
|
||||
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := FS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
42
cmd/cmount/mount_test.go
Normal file
42
cmd/cmount/mount_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/cmd/mountlib/mounttest"
|
||||
)
|
||||
|
||||
func notWin(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("not running on windows")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) { mounttest.TestMain(m, mount) }
|
||||
func TestDirLs(t *testing.T) { mounttest.TestDirLs(t) }
|
||||
func TestDirCreateAndRemoveDir(t *testing.T) { notWin(t); mounttest.TestDirCreateAndRemoveDir(t) }
|
||||
func TestDirCreateAndRemoveFile(t *testing.T) { notWin(t); mounttest.TestDirCreateAndRemoveFile(t) }
|
||||
func TestDirRenameFile(t *testing.T) { notWin(t); mounttest.TestDirRenameFile(t) }
|
||||
func TestDirRenameEmptyDir(t *testing.T) { notWin(t); mounttest.TestDirRenameEmptyDir(t) }
|
||||
func TestDirRenameFullDir(t *testing.T) { notWin(t); mounttest.TestDirRenameFullDir(t) }
|
||||
func TestDirModTime(t *testing.T) { notWin(t); mounttest.TestDirModTime(t) }
|
||||
func TestDirCacheFlush(t *testing.T) { notWin(t); mounttest.TestDirCacheFlush(t) }
|
||||
func TestDirCacheFlushOnDirRename(t *testing.T) { notWin(t); mounttest.TestDirCacheFlushOnDirRename(t) }
|
||||
func TestFileModTime(t *testing.T) { notWin(t); mounttest.TestFileModTime(t) }
|
||||
func TestFileModTimeWithOpenWriters(t *testing.T) {} // FIXME mounttest.TestFileModTimeWithOpenWriters(t)
|
||||
func TestMount(t *testing.T) { notWin(t); mounttest.TestMount(t) }
|
||||
func TestRoot(t *testing.T) { notWin(t); mounttest.TestRoot(t) }
|
||||
func TestReadByByte(t *testing.T) { notWin(t); mounttest.TestReadByByte(t) }
|
||||
func TestReadChecksum(t *testing.T) { notWin(t); mounttest.TestReadChecksum(t) }
|
||||
func TestReadFileDoubleClose(t *testing.T) { notWin(t); mounttest.TestReadFileDoubleClose(t) }
|
||||
func TestReadSeek(t *testing.T) { notWin(t); mounttest.TestReadSeek(t) }
|
||||
func TestWriteFileNoWrite(t *testing.T) { notWin(t); mounttest.TestWriteFileNoWrite(t) }
|
||||
func TestWriteFileWrite(t *testing.T) { notWin(t); mounttest.TestWriteFileWrite(t) }
|
||||
func TestWriteFileOverwrite(t *testing.T) { notWin(t); mounttest.TestWriteFileOverwrite(t) }
|
||||
func TestWriteFileDoubleClose(t *testing.T) { notWin(t); mounttest.TestWriteFileDoubleClose(t) }
|
||||
func TestWriteFileFsync(t *testing.T) { notWin(t); mounttest.TestWriteFileFsync(t) }
|
||||
6
cmd/cmount/mount_unsupported.go
Normal file
6
cmd/cmount/mount_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for cmount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !linux,!darwin,!freebsd,!windows !cgo !cmount
|
||||
|
||||
package cmount
|
||||
20
cmd/config/config.go
Normal file
20
cmd/config/config.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: `Enter an interactive configuration session.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
fs.EditConfig()
|
||||
},
|
||||
}
|
||||
63
cmd/copy/copy.go
Normal file
63
cmd/copy/copy.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Long: `
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
|
||||
Note that it is always the contents of the directory that is synced,
|
||||
not the directory so when source:path is a directory, it's the
|
||||
contents of source:path that are copied, not the directory name and
|
||||
contents.
|
||||
|
||||
If dest:path doesn't exist, it is created and the source:path contents
|
||||
go there.
|
||||
|
||||
For example
|
||||
|
||||
rclone copy source:sourcepath dest:destpath
|
||||
|
||||
Let's say there are two files in sourcepath
|
||||
|
||||
sourcepath/one.txt
|
||||
sourcepath/two.txt
|
||||
|
||||
This copies them to
|
||||
|
||||
destpath/one.txt
|
||||
destpath/two.txt
|
||||
|
||||
Not to
|
||||
|
||||
destpath/sourcepath/one.txt
|
||||
destpath/sourcepath/two.txt
|
||||
|
||||
If you are familiar with ` + "`rsync`" + `, rclone always works as if you had
|
||||
written a trailing / - meaning "copy the contents of this directory".
|
||||
This applies to all commands and whether you are talking about the
|
||||
source or destination.
|
||||
|
||||
See the ` + "`--no-traverse`" + ` option for controlling whether rclone lists
|
||||
the destination directory or not.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
return fs.CopyDir(fdst, fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
53
cmd/copyto/copyto.go
Normal file
53
cmd/copyto/copyto.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package copyto
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "copyto source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Long: `
|
||||
If source:path is a file or directory then it copies it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
This can be used to upload single files to other than their current
|
||||
name. If the source is a directory then it acts exactly like the copy
|
||||
command.
|
||||
|
||||
So
|
||||
|
||||
rclone copyto src dst
|
||||
|
||||
where src and dst are rclone paths, either remote:path or
|
||||
/path/to/local or C:\windows\path\if\on\windows.
|
||||
|
||||
This will:
|
||||
|
||||
if src is file
|
||||
copy it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
copy it to dst, overwriting existing files if they exist
|
||||
see copy command for full details
|
||||
|
||||
This doesn't transfer unchanged files, testing by size and
|
||||
modification time or MD5SUM. It doesn't delete files from the
|
||||
destination.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return fs.CopyDir(fdst, fsrc)
|
||||
}
|
||||
return fs.CopyFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
},
|
||||
}
|
||||
101
cmd/cryptcheck/cryptcheck.go
Normal file
101
cmd/cryptcheck/cryptcheck.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package cryptcheck
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/crypt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "cryptcheck remote:path cryptedremote:path",
|
||||
Short: `Cryptcheck checks the integrity of a crypted remote.`,
|
||||
Long: `
|
||||
rclone cryptcheck checks a remote against a crypted remote. This is
|
||||
the equivalent of running rclone check, but able to check the
|
||||
checksums of the crypted remote.
|
||||
|
||||
For it to work the underlying remote of the cryptedremote must support
|
||||
some kind of checksum.
|
||||
|
||||
It works by reading the nonce from each file on the cryptedremote: and
|
||||
using that to encrypt each file on the remote:. It then checks the
|
||||
checksum of the underlying file on the cryptedremote: against the
|
||||
checksum of the file it has just encrypted.
|
||||
|
||||
Use it like this
|
||||
|
||||
rclone cryptcheck /path/to/files encryptedremote:path
|
||||
|
||||
You can use it like this also, but that will involve downloading all
|
||||
the files in remote:path.
|
||||
|
||||
rclone cryptcheck remote:path encryptedremote:path
|
||||
|
||||
After it has run it will log the status of the encryptedremote:.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(false, true, command, func() error {
|
||||
return cryptCheck(fdst, fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
// cryptCheck checks the integrity of a crypted remote
|
||||
func cryptCheck(fdst, fsrc fs.Fs) error {
|
||||
// Check to see fcrypt is a crypt
|
||||
fcrypt, ok := fdst.(*crypt.Fs)
|
||||
if !ok {
|
||||
return errors.Errorf("%s:%s is not a crypt remote", fdst.Name(), fdst.Root())
|
||||
}
|
||||
// Find a hash to use
|
||||
funderlying := fcrypt.UnWrap()
|
||||
hashType := funderlying.Hashes().GetOne()
|
||||
if hashType == fs.HashNone {
|
||||
return errors.Errorf("%s:%s does not support any hashes", funderlying.Name(), funderlying.Root())
|
||||
}
|
||||
fs.Infof(nil, "Using %v for hash comparisons", hashType)
|
||||
|
||||
// checkIdentical checks to see if dst and src are identical
|
||||
//
|
||||
// it returns true if differences were found
|
||||
// it also returns whether it couldn't be hashed
|
||||
checkIdentical := func(dst, src fs.Object) (differ bool, noHash bool) {
|
||||
cryptDst := dst.(*crypt.Object)
|
||||
underlyingDst := cryptDst.UnWrap()
|
||||
underlyingHash, err := underlyingDst.Hash(hashType)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
|
||||
return true, false
|
||||
}
|
||||
if underlyingHash == "" {
|
||||
return false, true
|
||||
}
|
||||
cryptHash, err := fcrypt.ComputeHash(cryptDst, src, hashType)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Errorf(dst, "Error computing hash: %v", err)
|
||||
return true, false
|
||||
}
|
||||
if cryptHash == "" {
|
||||
return false, true
|
||||
}
|
||||
if cryptHash != underlyingHash {
|
||||
fs.Stats.Error()
|
||||
fs.Errorf(src, "hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
return true, false
|
||||
}
|
||||
fs.Debugf(src, "OK")
|
||||
return false, false
|
||||
}
|
||||
|
||||
return fs.CheckFn(fcrypt, fsrc, checkIdentical)
|
||||
}
|
||||
31
cmd/dbhashsum/dbhashsum.go
Normal file
31
cmd/dbhashsum/dbhashsum.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package dbhashsum
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "dbhashsum remote:path",
|
||||
Short: `Produces a Dropbbox hash file for all the objects in the path.`,
|
||||
Long: `
|
||||
Produces a Dropbox hash file for all the objects in the path. The
|
||||
hashes are calculated according to [Dropbox content hash
|
||||
rules](https://www.dropbox.com/developers/reference/content-hash).
|
||||
The output is in the same format as md5sum and sha1sum.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.DropboxHashSum(fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
113
cmd/dedupe/dedupe.go
Normal file
113
cmd/dedupe/dedupe.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package dedupe
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
dedupeMode = fs.DeduplicateInteractive
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().VarP(&dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "dedupe [mode] remote:path",
|
||||
Short: `Interactively find duplicate files delete/rename them.`,
|
||||
Long: `
|
||||
By default ` + "`" + `dedup` + "`" + ` interactively finds duplicate files and offers to
|
||||
delete all but one or rename them to be different. Only useful with
|
||||
Google Drive which can have duplicate file names.
|
||||
|
||||
The ` + "`" + `dedupe` + "`" + ` command will delete all but one of any identical (same
|
||||
md5sum) files it finds without confirmation. This means that for most
|
||||
duplicated files the ` + "`" + `dedupe` + "`" + ` command will not be interactive. You
|
||||
can use ` + "`" + `--dry-run` + "`" + ` to see what would happen without doing anything.
|
||||
|
||||
Here is an example run.
|
||||
|
||||
Before - with duplicates
|
||||
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||
564374 2016-03-05 16:23:06.731000000 one.txt
|
||||
6048320 2016-03-05 16:18:26.092000000 one.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
|
||||
Now the ` + "`" + `dedupe` + "`" + ` session
|
||||
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 duplicates - deleting identical copies
|
||||
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 duplicates - deleting identical copies
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> r
|
||||
two-1.txt: renamed from: two.txt
|
||||
two-2.txt: renamed from: two.txt
|
||||
two-3.txt: renamed from: two.txt
|
||||
|
||||
The result being
|
||||
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
||||
|
||||
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value
|
||||
|
||||
* ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
||||
* ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
||||
* ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
||||
* ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
||||
* ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
||||
* ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
||||
|
||||
For example to rename all the identically named photos in your Google Photos directory, do
|
||||
|
||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||
|
||||
Or
|
||||
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
if len(args) > 1 {
|
||||
err := dedupeMode.Set(args[0])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
args = args[1:]
|
||||
}
|
||||
fdst := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.Deduplicate(fdst, dedupeMode)
|
||||
})
|
||||
},
|
||||
}
|
||||
41
cmd/delete/delete.go
Normal file
41
cmd/delete/delete.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package delete
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "delete remote:path",
|
||||
Short: `Remove the contents of path.`,
|
||||
Long: `
|
||||
Remove the contents of path. Unlike ` + "`" + `purge` + "`" + ` it obeys include/exclude
|
||||
filters so can be used to selectively delete files.
|
||||
|
||||
Eg delete all files bigger than 100MBytes
|
||||
|
||||
Check what would be deleted first (use either)
|
||||
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
|
||||
Then delete
|
||||
|
||||
rclone --min-size 100M delete remote:path
|
||||
|
||||
That reads "delete everything with a minimum size of 100 MB", hence
|
||||
delete all files bigger than 100MBytes.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return fs.Delete(fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
44
cmd/genautocomplete/genautocomplete.go
Normal file
44
cmd/genautocomplete/genautocomplete.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "genautocomplete [output_file]",
|
||||
Short: `Output bash completion script for rclone.`,
|
||||
Long: `
|
||||
Generates a bash shell autocompletion script for rclone.
|
||||
|
||||
This writes to /etc/bash_completion.d/rclone by default so will
|
||||
probably need to be run with sudo or as root, eg
|
||||
|
||||
sudo rclone genautocomplete
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
. /etc/bash_completion
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/bash_completion.d/rclone"
|
||||
if len(args) > 0 {
|
||||
out = args[0]
|
||||
}
|
||||
err := cmd.Root.GenBashCompletionFile(out)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
55
cmd/gendocs/gendocs.go
Normal file
55
cmd/gendocs/gendocs.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package gendocs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
const gendocFrontmatterTemplate = `---
|
||||
date: %s
|
||||
title: "%s"
|
||||
slug: %s
|
||||
url: %s
|
||||
---
|
||||
`
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "gendocs output_directory",
|
||||
Short: `Output markdown docs for rclone to the directory supplied.`,
|
||||
Long: `
|
||||
This produces markdown docs for the rclone commands to the directory
|
||||
supplied. These are in a format suitable for hugo to render into the
|
||||
rclone.org website.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
out := args[0]
|
||||
err := os.MkdirAll(out, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
now := time.Now().Format(time.RFC3339)
|
||||
prepender := func(filename string) string {
|
||||
name := filepath.Base(filename)
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
url := "/commands/" + strings.ToLower(base) + "/"
|
||||
return fmt.Sprintf(gendocFrontmatterTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
|
||||
}
|
||||
linkHandler := func(name string) string {
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
return "/commands/" + strings.ToLower(base) + "/"
|
||||
}
|
||||
return doc.GenMarkdownTreeCustom(cmd.Root, out, prepender, linkHandler)
|
||||
},
|
||||
}
|
||||
18
cmd/info/all.sh
Executable file
18
cmd/info/all.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
exec rclone --check-normalization=true --check-control=true --check-length=true info \
|
||||
/tmp/testInfo \
|
||||
TestAmazonCloudDrive:testInfo \
|
||||
TestB2:testInfo \
|
||||
TestCryptDrive:testInfo \
|
||||
TestCryptSwift:testInfo \
|
||||
TestDrive:testInfo \
|
||||
TestDropbox:testInfo \
|
||||
TestGoogleCloudStorage:rclone-testinfo \
|
||||
TestOneDrive:testInfo \
|
||||
TestS3:rclone-testinfo \
|
||||
TestSftp:testInfo \
|
||||
TestSwift:testInfo \
|
||||
TestYandex:testInfo \
|
||||
TestFTP:testInfo
|
||||
|
||||
# TestHubic:testInfo \
|
||||
214
cmd/info/info.go
Normal file
214
cmd/info/info.go
Normal file
@@ -0,0 +1,214 @@
|
||||
package info
|
||||
|
||||
// FIXME once translations are implemented will need a no-escape
|
||||
// option for Put so we can make these tests work agaig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
checkNormalization bool
|
||||
checkControl bool
|
||||
checkLength bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
|
||||
commandDefintion.Flags().BoolVarP(&checkControl, "check-control", "", true, "Check control characters.")
|
||||
commandDefintion.Flags().BoolVarP(&checkLength, "check-length", "", true, "Check max filename length.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "info [remote:path]+",
|
||||
Short: `Discovers file name limitations for paths.`,
|
||||
Long: `rclone info discovers what filenames are possible to write to the
|
||||
paths passed in and how long they can be. It can take some time. It
|
||||
will write test files into the remote:path passed in. It outputs a bit
|
||||
of go code for each one.
|
||||
`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1E6, command, args)
|
||||
for i := range args {
|
||||
f := cmd.NewFsDst(args[i : i+1])
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return readInfo(f)
|
||||
})
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
type results struct {
|
||||
f fs.Fs
|
||||
mu sync.Mutex
|
||||
charNeedsEscaping map[rune]bool
|
||||
maxFileLength int
|
||||
canWriteUnnormalized bool
|
||||
canReadUnnormalized bool
|
||||
canReadRenormalized bool
|
||||
}
|
||||
|
||||
func newResults(f fs.Fs) *results {
|
||||
return &results{
|
||||
f: f,
|
||||
charNeedsEscaping: make(map[rune]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Print the results to stdout
|
||||
func (r *results) Print() {
|
||||
fmt.Printf("// %s\n", r.f.Name())
|
||||
if checkControl {
|
||||
escape := []string{}
|
||||
for c, needsEscape := range r.charNeedsEscaping {
|
||||
if needsEscape {
|
||||
escape = append(escape, fmt.Sprintf("0x%02X", c))
|
||||
}
|
||||
}
|
||||
sort.Strings(escape)
|
||||
fmt.Printf("charNeedsEscaping = []byte{\n")
|
||||
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
|
||||
fmt.Printf("}\n")
|
||||
}
|
||||
if checkLength {
|
||||
fmt.Printf("maxFileLength = %d\n", r.maxFileLength)
|
||||
}
|
||||
if checkNormalization {
|
||||
fmt.Printf("canWriteUnnormalized = %v\n", r.canWriteUnnormalized)
|
||||
fmt.Printf("canReadUnnormalized = %v\n", r.canReadUnnormalized)
|
||||
fmt.Printf("canReadRenormalized = %v\n", r.canReadRenormalized)
|
||||
}
|
||||
}
|
||||
|
||||
// writeFile writes a file with some random contents
|
||||
func (r *results) writeFile(path string) (fs.Object, error) {
|
||||
contents := fstest.RandomString(50)
|
||||
src := fs.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
|
||||
return r.f.Put(bytes.NewBufferString(contents), src)
|
||||
}
|
||||
|
||||
// check whether normalization is enforced and check whether it is
|
||||
// done on the files anyway
|
||||
func (r *results) checkUTF8Normalization() {
|
||||
unnormalized := "Héroique"
|
||||
normalized := "Héroique"
|
||||
_, err := r.writeFile(unnormalized)
|
||||
if err != nil {
|
||||
r.canWriteUnnormalized = false
|
||||
return
|
||||
}
|
||||
r.canWriteUnnormalized = true
|
||||
_, err = r.f.NewObject(unnormalized)
|
||||
if err == nil {
|
||||
r.canReadUnnormalized = true
|
||||
}
|
||||
_, err = r.f.NewObject(normalized)
|
||||
if err == nil {
|
||||
r.canReadRenormalized = true
|
||||
}
|
||||
}
|
||||
|
||||
// check we can write file with the rune passed in
|
||||
func (r *results) checkChar(c rune) {
|
||||
fs.Infof(r.f, "Writing file 0x%02X", c)
|
||||
path := fmt.Sprintf("0x%02X-%c-", c, c)
|
||||
_, err := r.writeFile(path)
|
||||
escape := false
|
||||
if err != nil {
|
||||
fs.Infof(r.f, "Couldn't write file 0x%02X", c)
|
||||
} else {
|
||||
fs.Infof(r.f, "OK writing file 0x%02X", c)
|
||||
}
|
||||
r.mu.Lock()
|
||||
r.charNeedsEscaping[c] = escape
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
// check we can write a file with the control chars
|
||||
func (r *results) checkControls() {
|
||||
fs.Infof(r.f, "Trying to create control character file names")
|
||||
// Concurrency control
|
||||
tokens := make(chan struct{}, fs.Config.Checkers)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
tokens <- struct{}{}
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
for i := rune(0); i < 128; i++ {
|
||||
if i == 0 || i == '/' {
|
||||
// We're not even going to check NULL or /
|
||||
r.charNeedsEscaping[i] = true
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
c := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
token := <-tokens
|
||||
r.checkChar(c)
|
||||
tokens <- token
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
fs.Infof(r.f, "Done trying to create control character file names")
|
||||
}
|
||||
|
||||
// find the max file name size we can use
|
||||
func (r *results) findMaxLength() {
|
||||
const maxLen = 16 * 1024
|
||||
name := make([]byte, maxLen)
|
||||
for i := range name {
|
||||
name[i] = 'a'
|
||||
}
|
||||
// Find the first size of filename we can't write
|
||||
i := sort.Search(len(name), func(i int) (fail bool) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
|
||||
fail = true
|
||||
}
|
||||
}()
|
||||
|
||||
path := string(name[:i])
|
||||
_, err := r.writeFile(path)
|
||||
if err != nil {
|
||||
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
|
||||
return true
|
||||
}
|
||||
fs.Infof(r.f, "Wrote file with name length %d", i)
|
||||
return false
|
||||
})
|
||||
r.maxFileLength = i - 1
|
||||
fs.Infof(r.f, "Max file length is %d", r.maxFileLength)
|
||||
}
|
||||
|
||||
func readInfo(f fs.Fs) error {
|
||||
err := f.Mkdir("")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't mkdir")
|
||||
}
|
||||
r := newResults(f)
|
||||
if checkControl {
|
||||
r.checkControls()
|
||||
}
|
||||
if checkLength {
|
||||
r.findMaxLength()
|
||||
}
|
||||
if checkNormalization {
|
||||
r.checkUTF8Normalization()
|
||||
}
|
||||
r.Print()
|
||||
return nil
|
||||
}
|
||||
49
cmd/listremotes/listremotes.go
Normal file
49
cmd/listremotes/listremotes.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package ls
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
listLong bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&listLong, "long", "l", listLong, "Show the type as well as names.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "listremotes",
|
||||
Short: `List all the remotes in the config file.`,
|
||||
Long: `
|
||||
rclone listremotes lists all the available remotes from the config file.
|
||||
|
||||
When uses with the -l flag it lists the types too.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
remotes := fs.ConfigFileSections()
|
||||
sort.Strings(remotes)
|
||||
maxlen := 1
|
||||
for _, remote := range remotes {
|
||||
if len(remote) > maxlen {
|
||||
maxlen = len(remote)
|
||||
}
|
||||
}
|
||||
for _, remote := range remotes {
|
||||
if listLong {
|
||||
remoteType := fs.ConfigFileGet(remote, "type", "UNKNOWN")
|
||||
fmt.Printf("%-*s %s\n", maxlen+1, remote+":", remoteType)
|
||||
} else {
|
||||
fmt.Printf("%s:\n", remote)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
25
cmd/ls/ls.go
Normal file
25
cmd/ls/ls.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package ls
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "ls remote:path",
|
||||
Short: `List all the objects in the path with size and path.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.List(fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
46
cmd/ls2/ls2.go
Normal file
46
cmd/ls2/ls2.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package ls2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
recurse bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "ls2 remote:path",
|
||||
Short: `List directories and objects in the path.`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.Walk(fsrc, "", false, fs.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Errorf(path, "error listing: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, entry := range entries {
|
||||
_, isDir := entry.(fs.Directory)
|
||||
if isDir {
|
||||
fmt.Println(entry.Remote() + "/")
|
||||
} else {
|
||||
fmt.Println(entry.Remote())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
},
|
||||
}
|
||||
25
cmd/lsd/lsd.go
Normal file
25
cmd/lsd/lsd.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package lsd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "lsd remote:path",
|
||||
Short: `List all directories/containers/buckets in the path.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.ListDir(fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
147
cmd/lsjson/lsjson.go
Normal file
147
cmd/lsjson/lsjson.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package lsjson
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
recurse bool
|
||||
showHash bool
|
||||
noModTime bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
commandDefintion.Flags().BoolVarP(&showHash, "hash", "", false, "Include hashes in the output (may take longer).")
|
||||
commandDefintion.Flags().BoolVarP(&noModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
|
||||
}
|
||||
|
||||
// lsJSON in the struct which gets marshalled for each line
|
||||
type lsJSON struct {
|
||||
Path string
|
||||
Name string
|
||||
Size int64
|
||||
ModTime Timestamp //`json:",omitempty"`
|
||||
IsDir bool
|
||||
Hashes map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Timestamp a time in RFC3339 format with Nanosecond precision secongs
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON
|
||||
func (t Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
tt := time.Time(t)
|
||||
if tt.IsZero() {
|
||||
return []byte(`""`), nil
|
||||
}
|
||||
return []byte(`"` + tt.Format(time.RFC3339Nano) + `"`), nil
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "lsjson remote:path",
|
||||
Short: `List directories and objects in the path in JSON format.`,
|
||||
Long: `List directories and objects in the path in JSON format.
|
||||
|
||||
The output is an array of Items, where each Item looks like this
|
||||
|
||||
{
|
||||
"Hashes" : {
|
||||
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
|
||||
"MD5" : "b1946ac92492d2347c6235b4d2611184",
|
||||
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
|
||||
},
|
||||
"IsDir" : false,
|
||||
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
|
||||
"Name" : "file.txt",
|
||||
"Path" : "full/path/goes/here/file.txt",
|
||||
"Size" : 6
|
||||
}
|
||||
|
||||
If --hash is not specified the the Hashes property won't be emitted.
|
||||
|
||||
If --no-modtime is specified then ModTime will be blank.
|
||||
|
||||
The time is in RFC3339 format with nanosecond precision.
|
||||
|
||||
The whole output can be processed as a JSON blob, or alternatively it
|
||||
can be processed line by line as each item is written one to a line.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
fmt.Println("[")
|
||||
first := true
|
||||
err := fs.Walk(fsrc, "", false, fs.ConfigMaxDepth(recurse), func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Errorf(dirPath, "error listing: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, entry := range entries {
|
||||
item := lsJSON{
|
||||
Path: entry.Remote(),
|
||||
Name: path.Base(entry.Remote()),
|
||||
Size: entry.Size(),
|
||||
}
|
||||
if !noModTime {
|
||||
item.ModTime = Timestamp(entry.ModTime())
|
||||
}
|
||||
switch x := entry.(type) {
|
||||
case fs.Directory:
|
||||
item.IsDir = true
|
||||
case fs.Object:
|
||||
item.IsDir = false
|
||||
if showHash {
|
||||
item.Hashes = make(map[string]string)
|
||||
for _, hashType := range x.Fs().Hashes().Array() {
|
||||
hash, err := x.Hash(hashType)
|
||||
if err != nil {
|
||||
fs.Errorf(x, "Failed to read hash: %v", err)
|
||||
} else if hash != "" {
|
||||
item.Hashes[hashType.String()] = hash
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
fs.Errorf(nil, "Unknown type %T in listing", entry)
|
||||
}
|
||||
out, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal list object")
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
fmt.Print(",\n")
|
||||
}
|
||||
_, err = os.Stdout.Write(out)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to write to output")
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error listing JSON")
|
||||
}
|
||||
if !first {
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println("]")
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
25
cmd/lsl/lsl.go
Normal file
25
cmd/lsl/lsl.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package lsl
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "lsl remote:path",
|
||||
Short: `List all the objects path with modification time, size and path.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.ListLong(fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
29
cmd/md5sum/md5sum.go
Normal file
29
cmd/md5sum/md5sum.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package md5sum
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "md5sum remote:path",
|
||||
Short: `Produces an md5sum file for all the objects in the path.`,
|
||||
Long: `
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.Md5sum(fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
49
cmd/memtest/memtest.go
Normal file
49
cmd/memtest/memtest.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package memtest
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "memtest remote:path",
|
||||
Short: `Load all the objects at remote:path and report memory stats.`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
objects, _, err := fs.Count(fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
objs := make([]fs.Object, 0, objects)
|
||||
var before, after runtime.MemStats
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&before)
|
||||
var mu sync.Mutex
|
||||
err = fs.ListFn(fsrc, func(o fs.Object) {
|
||||
mu.Lock()
|
||||
objs = append(objs, o)
|
||||
mu.Unlock()
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&after)
|
||||
usedMemory := after.Alloc - before.Alloc
|
||||
fs.Logf(nil, "%d objects took %d bytes, %.1f bytes/object", len(objs), usedMemory, float64(usedMemory)/float64(len(objs)))
|
||||
fs.Logf(nil, "System memory changed from %d to %d bytes a change of %d bytes", before.Sys, after.Sys, after.Sys-before.Sys)
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
23
cmd/mkdir/mkdir.go
Normal file
23
cmd/mkdir/mkdir.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package mkdir
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "mkdir remote:path",
|
||||
Short: `Make the path if it doesn't already exist.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst := cmd.NewFsDst(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return fs.Mkdir(fdst, "")
|
||||
})
|
||||
},
|
||||
}
|
||||
204
cmd/mount/dir.go
Normal file
204
cmd/mount/dir.go
Normal file
@@ -0,0 +1,204 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// DirEntry describes the contents of a directory entry
|
||||
//
|
||||
// It can be a file or a directory
|
||||
//
|
||||
// node may be nil, but o may not
|
||||
type DirEntry struct {
|
||||
o fs.DirEntry
|
||||
node fusefs.Node
|
||||
}
|
||||
|
||||
// Dir represents a directory entry
|
||||
type Dir struct {
|
||||
*mountlib.Dir
|
||||
// f fs.Fs
|
||||
// path string
|
||||
// modTime time.Time
|
||||
// mu sync.RWMutex // protects the following
|
||||
// read time.Time // time directory entry last read
|
||||
// items map[string]*DirEntry
|
||||
}
|
||||
|
||||
// Check interface satsified
|
||||
var _ fusefs.Node = (*Dir)(nil)
|
||||
|
||||
// Attr updates the attributes of a directory
|
||||
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer fs.Trace(d, "")("attr=%+v, err=%v", a, &err)
|
||||
a.Gid = mountlib.GID
|
||||
a.Uid = mountlib.UID
|
||||
a.Mode = os.ModeDir | mountlib.DirPerms
|
||||
modTime := d.ModTime()
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
a.Ctime = modTime
|
||||
a.Crtime = modTime
|
||||
// FIXME include Valid so get some caching?
|
||||
// FIXME fs.Debugf(d.path, "Dir.Attr %+v", a)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeSetattrer = (*Dir)(nil)
|
||||
|
||||
// Setattr handles attribute changes from FUSE. Currently supports ModTime only.
|
||||
func (d *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
|
||||
defer fs.Trace(d, "stat=%+v", req)("err=%v", &err)
|
||||
if mountlib.NoModTime {
|
||||
return nil
|
||||
}
|
||||
|
||||
if req.Valid.MtimeNow() {
|
||||
err = d.SetModTime(time.Now())
|
||||
} else if req.Valid.Mtime() {
|
||||
err = d.SetModTime(req.Mtime)
|
||||
}
|
||||
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeRequestLookuper = (*Dir)(nil)
|
||||
|
||||
// Lookup looks up a specific entry in the receiver.
|
||||
//
|
||||
// Lookup should return a Node corresponding to the entry. If the
|
||||
// name does not exist in the directory, Lookup should return ENOENT.
|
||||
//
|
||||
// Lookup need not to handle the names "." and "..".
|
||||
func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) {
|
||||
defer fs.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
|
||||
mnode, err := d.Dir.Lookup(req.Name)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
switch x := mnode.(type) {
|
||||
case *mountlib.File:
|
||||
return &File{x}, nil
|
||||
case *mountlib.Dir:
|
||||
return &Dir{x}, nil
|
||||
}
|
||||
panic("bad type")
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.HandleReadDirAller = (*Dir)(nil)
|
||||
|
||||
// ReadDirAll reads the contents of the directory
|
||||
func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) {
|
||||
itemsRead := -1
|
||||
defer fs.Trace(d, "")("item=%d, err=%v", &itemsRead, &err)
|
||||
items, err := d.Dir.ReadDirAll()
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
for _, item := range items {
|
||||
var dirent fuse.Dirent
|
||||
switch x := item.Obj.(type) {
|
||||
case fs.Object:
|
||||
dirent = fuse.Dirent{
|
||||
// Inode FIXME ???
|
||||
Type: fuse.DT_File,
|
||||
Name: path.Base(x.Remote()),
|
||||
}
|
||||
case fs.Directory:
|
||||
dirent = fuse.Dirent{
|
||||
// Inode FIXME ???
|
||||
Type: fuse.DT_Dir,
|
||||
Name: path.Base(x.Remote()),
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknown type %T", item)
|
||||
}
|
||||
dirents = append(dirents, dirent)
|
||||
}
|
||||
itemsRead = len(dirents)
|
||||
return dirents, nil
|
||||
}
|
||||
|
||||
var _ fusefs.NodeCreater = (*Dir)(nil)
|
||||
|
||||
// Create makes a new file
|
||||
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
|
||||
defer fs.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
|
||||
file, fh, err := d.Dir.Create(req.Name)
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
return &File{file}, &WriteFileHandle{fh}, err
|
||||
}
|
||||
|
||||
var _ fusefs.NodeMkdirer = (*Dir)(nil)
|
||||
|
||||
// Mkdir creates a new directory
|
||||
func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.Node, err error) {
|
||||
defer fs.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
|
||||
dir, err := d.Dir.Mkdir(req.Name)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
return &Dir{dir}, nil
|
||||
}
|
||||
|
||||
var _ fusefs.NodeRemover = (*Dir)(nil)
|
||||
|
||||
// Remove removes the entry with the given name from
|
||||
// the receiver, which must be a directory. The entry to be removed
|
||||
// may correspond to a file (unlink) or to a directory (rmdir).
|
||||
func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
|
||||
defer fs.Trace(d, "name=%q", req.Name)("err=%v", &err)
|
||||
err = d.Dir.Remove(req.Name)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeRenamer = (*Dir)(nil)
|
||||
|
||||
// Rename the file
|
||||
func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs.Node) (err error) {
|
||||
defer fs.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err)
|
||||
destDir, ok := newDir.(*Dir)
|
||||
if !ok {
|
||||
return errors.Errorf("Unknown Dir type %T", newDir)
|
||||
}
|
||||
|
||||
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeFsyncer = (*Dir)(nil)
|
||||
|
||||
// Fsync the directory
|
||||
func (d *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
|
||||
defer fs.Trace(d, "")("err=%v", &err)
|
||||
err = d.Dir.Fsync()
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
118
cmd/mount/file.go
Normal file
118
cmd/mount/file.go
Normal file
@@ -0,0 +1,118 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// File represents a file
|
||||
type File struct {
|
||||
*mountlib.File
|
||||
// size int64 // size of file - read and written with atomic int64 - must be 64 bit aligned
|
||||
// d *Dir // parent directory - read only
|
||||
// mu sync.RWMutex // protects the following
|
||||
// o fs.Object // NB o may be nil if file is being written
|
||||
// writers int // number of writers for this file
|
||||
// pendingModTime time.Time // will be applied once o becomes available, i.e. after file was written
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.Node = (*File)(nil)
|
||||
|
||||
// Attr fills out the attributes for the file
|
||||
func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer fs.Trace(f, "")("a=%+v, err=%v", a, &err)
|
||||
modTime, Size, Blocks, err := f.File.Attr(mountlib.NoModTime)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
a.Gid = mountlib.GID
|
||||
a.Uid = mountlib.UID
|
||||
a.Mode = mountlib.FilePerms
|
||||
a.Size = Size
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
a.Ctime = modTime
|
||||
a.Crtime = modTime
|
||||
a.Blocks = Blocks
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeSetattrer = (*File)(nil)
|
||||
|
||||
// Setattr handles attribute changes from FUSE. Currently supports ModTime only.
|
||||
func (f *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
|
||||
defer fs.Trace(f, "a=%+v", req)("err=%v", &err)
|
||||
if mountlib.NoModTime {
|
||||
return nil
|
||||
}
|
||||
if req.Valid.MtimeNow() {
|
||||
err = f.File.SetModTime(time.Now())
|
||||
} else if req.Valid.Mtime() {
|
||||
err = f.File.SetModTime(req.Mtime)
|
||||
}
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeOpener = (*File)(nil)
|
||||
|
||||
// Open the file for read or write
|
||||
func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fh fusefs.Handle, err error) {
|
||||
defer fs.Trace(f, "flags=%v", req.Flags)("fh=%v, err=%v", &fh, &err)
|
||||
switch {
|
||||
case req.Flags.IsReadOnly():
|
||||
if mountlib.NoSeek {
|
||||
resp.Flags |= fuse.OpenNonSeekable
|
||||
}
|
||||
var rfh *mountlib.ReadFileHandle
|
||||
rfh, err = f.File.OpenRead()
|
||||
fh = &ReadFileHandle{rfh}
|
||||
case req.Flags.IsWriteOnly() || (req.Flags.IsReadWrite() && (req.Flags&fuse.OpenTruncate) != 0):
|
||||
resp.Flags |= fuse.OpenNonSeekable
|
||||
var wfh *mountlib.WriteFileHandle
|
||||
wfh, err = f.File.OpenWrite()
|
||||
fh = &WriteFileHandle{wfh}
|
||||
case req.Flags.IsReadWrite():
|
||||
err = errors.New("can't open for read and write simultaneously")
|
||||
default:
|
||||
err = errors.Errorf("can't figure out how to open with flags %v", req.Flags)
|
||||
}
|
||||
|
||||
/*
|
||||
// File was opened in append-only mode, all writes will go to end
|
||||
// of file. OS X does not provide this information.
|
||||
OpenAppend OpenFlags = syscall.O_APPEND
|
||||
OpenCreate OpenFlags = syscall.O_CREAT
|
||||
OpenDirectory OpenFlags = syscall.O_DIRECTORY
|
||||
OpenExclusive OpenFlags = syscall.O_EXCL
|
||||
OpenNonblock OpenFlags = syscall.O_NONBLOCK
|
||||
OpenSync OpenFlags = syscall.O_SYNC
|
||||
OpenTruncate OpenFlags = syscall.O_TRUNC
|
||||
*/
|
||||
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeFsyncer = (*File)(nil)
|
||||
|
||||
// Fsync the file
|
||||
//
|
||||
// Note that we don't do anything except return OK
|
||||
func (f *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
|
||||
defer fs.Trace(f, "")("err=%v", &err)
|
||||
return nil
|
||||
}
|
||||
91
cmd/mount/fs.go
Normal file
91
cmd/mount/fs.go
Normal file
@@ -0,0 +1,91 @@
|
||||
// FUSE main Fs
|
||||
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// FS represents the top level filing system
|
||||
type FS struct {
|
||||
*mountlib.FS
|
||||
f fs.Fs
|
||||
}
|
||||
|
||||
// Check interface satistfied
|
||||
var _ fusefs.FS = (*FS)(nil)
|
||||
|
||||
// NewFS makes a new FS
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
fsys := &FS{
|
||||
FS: mountlib.NewFS(f),
|
||||
f: f,
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
|
||||
// Root returns the root node
|
||||
func (f *FS) Root() (node fusefs.Node, err error) {
|
||||
defer fs.Trace("", "")("node=%+v, err=%v", &node, &err)
|
||||
root, err := f.FS.Root()
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
return &Dir{root}, nil
|
||||
}
|
||||
|
||||
// Check interface satsified
|
||||
var _ fusefs.FSStatfser = (*FS)(nil)
|
||||
|
||||
// Statfs is called to obtain file system metadata.
|
||||
// It should write that data to resp.
|
||||
func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) (err error) {
|
||||
defer fs.Trace("", "")("stat=%+v, err=%v", resp, &err)
|
||||
const blockSize = 4096
|
||||
const fsBlocks = (1 << 50) / blockSize
|
||||
resp.Blocks = fsBlocks // Total data blocks in file system.
|
||||
resp.Bfree = fsBlocks // Free blocks in file system.
|
||||
resp.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
resp.Files = 1E9 // Total files in file system.
|
||||
resp.Ffree = 1E9 // Free files in file system.
|
||||
resp.Bsize = blockSize // Block size
|
||||
resp.Namelen = 255 // Maximum file name length?
|
||||
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Translate errors from mountlib
|
||||
func translateError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
cause := errors.Cause(err)
|
||||
if mErr, ok := cause.(mountlib.Error); ok {
|
||||
switch mErr {
|
||||
case mountlib.OK:
|
||||
return nil
|
||||
case mountlib.ENOENT:
|
||||
return fuse.ENOENT
|
||||
case mountlib.ENOTEMPTY:
|
||||
return fuse.Errno(syscall.ENOTEMPTY)
|
||||
case mountlib.EEXIST:
|
||||
return fuse.EEXIST
|
||||
case mountlib.ESPIPE:
|
||||
return fuse.Errno(syscall.ESPIPE)
|
||||
case mountlib.EBADF:
|
||||
return fuse.Errno(syscall.EBADF)
|
||||
case mountlib.EROFS:
|
||||
return fuse.Errno(syscall.EROFS)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
153
cmd/mount/mount.go
Normal file
153
cmd/mount/mount.go
Normal file
@@ -0,0 +1,153 @@
|
||||
// Package mount implents a FUSE mounting system for rclone remotes.
|
||||
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
mountlib.NewMountCommand("mount", Mount)
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(device string) (options []fuse.MountOption) {
|
||||
options = []fuse.MountOption{
|
||||
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device), fuse.VolumeName(device),
|
||||
fuse.NoAppleDouble(),
|
||||
fuse.NoAppleXattr(),
|
||||
|
||||
// Options from benchmarking in the fuse module
|
||||
//fuse.MaxReadahead(64 * 1024 * 1024),
|
||||
//fuse.AsyncRead(), - FIXME this causes
|
||||
// ReadFileHandle.Read error: read /home/files/ISOs/xubuntu-15.10-desktop-amd64.iso: bad file descriptor
|
||||
// which is probably related to errors people are having
|
||||
//fuse.WritebackCache(),
|
||||
}
|
||||
if mountlib.AllowNonEmpty {
|
||||
options = append(options, fuse.AllowNonEmptyMount())
|
||||
}
|
||||
if mountlib.AllowOther {
|
||||
options = append(options, fuse.AllowOther())
|
||||
}
|
||||
if mountlib.AllowRoot {
|
||||
options = append(options, fuse.AllowRoot())
|
||||
}
|
||||
if mountlib.DefaultPermissions {
|
||||
options = append(options, fuse.DefaultPermissions())
|
||||
}
|
||||
if mountlib.ReadOnly {
|
||||
options = append(options, fuse.ReadOnly())
|
||||
}
|
||||
if mountlib.WritebackCache {
|
||||
options = append(options, fuse.WritebackCache())
|
||||
}
|
||||
if len(*mountlib.ExtraOptions) > 0 {
|
||||
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
|
||||
}
|
||||
if len(*mountlib.ExtraOptions) > 0 {
|
||||
fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend")
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
// mount the file system
|
||||
//
|
||||
// The mount point will be ready when this returns.
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(f fs.Fs, mountpoint string) (*mountlib.FS, <-chan error, func() error, error) {
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(f.Name()+":"+f.Root())...)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
filesys := NewFS(f)
|
||||
server := fusefs.New(c, nil)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
err := server.Serve(filesys)
|
||||
closeErr := c.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
errChan <- err
|
||||
}()
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
if err := c.MountError; err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
unmount := func() error {
|
||||
return fuse.Unmount(mountpoint)
|
||||
}
|
||||
|
||||
return filesys.FS, errChan, unmount, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
if mountlib.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
fs.Debugf("fuse", "%v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Mount it
|
||||
FS, errChan, unmount, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
sigInt := make(chan os.Signal, 1)
|
||||
signal.Notify(sigInt, syscall.SIGINT, syscall.SIGTERM)
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// Program abort: umount
|
||||
case <-sigInt:
|
||||
err = unmount()
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := FS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
33
cmd/mount/mount_test.go
Normal file
33
cmd/mount/mount_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/cmd/mountlib/mounttest"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) { mounttest.TestMain(m, mount) }
|
||||
func TestDirLs(t *testing.T) { mounttest.TestDirLs(t) }
|
||||
func TestDirCreateAndRemoveDir(t *testing.T) { mounttest.TestDirCreateAndRemoveDir(t) }
|
||||
func TestDirCreateAndRemoveFile(t *testing.T) { mounttest.TestDirCreateAndRemoveFile(t) }
|
||||
func TestDirRenameFile(t *testing.T) { mounttest.TestDirRenameFile(t) }
|
||||
func TestDirRenameEmptyDir(t *testing.T) { mounttest.TestDirRenameEmptyDir(t) }
|
||||
func TestDirRenameFullDir(t *testing.T) { mounttest.TestDirRenameFullDir(t) }
|
||||
func TestDirModTime(t *testing.T) { mounttest.TestDirModTime(t) }
|
||||
func TestDirCacheFlush(t *testing.T) { mounttest.TestDirCacheFlush(t) }
|
||||
func TestDirCacheFlushOnDirRename(t *testing.T) { mounttest.TestDirCacheFlushOnDirRename(t) }
|
||||
func TestFileModTime(t *testing.T) { mounttest.TestFileModTime(t) }
|
||||
func TestFileModTimeWithOpenWriters(t *testing.T) { mounttest.TestFileModTimeWithOpenWriters(t) }
|
||||
func TestMount(t *testing.T) { mounttest.TestMount(t) }
|
||||
func TestRoot(t *testing.T) { mounttest.TestRoot(t) }
|
||||
func TestReadByByte(t *testing.T) { mounttest.TestReadByByte(t) }
|
||||
func TestReadChecksum(t *testing.T) { mounttest.TestReadChecksum(t) }
|
||||
func TestReadFileDoubleClose(t *testing.T) { mounttest.TestReadFileDoubleClose(t) }
|
||||
func TestReadSeek(t *testing.T) { mounttest.TestReadSeek(t) }
|
||||
func TestWriteFileNoWrite(t *testing.T) { mounttest.TestWriteFileNoWrite(t) }
|
||||
func TestWriteFileWrite(t *testing.T) { mounttest.TestWriteFileWrite(t) }
|
||||
func TestWriteFileOverwrite(t *testing.T) { mounttest.TestWriteFileOverwrite(t) }
|
||||
func TestWriteFileDoubleClose(t *testing.T) { mounttest.TestWriteFileDoubleClose(t) }
|
||||
func TestWriteFileFsync(t *testing.T) { mounttest.TestWriteFileFsync(t) }
|
||||
6
cmd/mount/mount_unsupported.go
Normal file
6
cmd/mount/mount_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !linux,!darwin,!freebsd
|
||||
|
||||
package mount
|
||||
63
cmd/mount/read.go
Normal file
63
cmd/mount/read.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// ReadFileHandle is an open for read file handle on a File
|
||||
type ReadFileHandle struct {
|
||||
*mountlib.ReadFileHandle
|
||||
// mu sync.Mutex
|
||||
// closed bool // set if handle has been closed
|
||||
// r *fs.Account
|
||||
// o fs.Object
|
||||
// readCalled bool // set if read has been called
|
||||
// offset int64
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.Handle = (*ReadFileHandle)(nil)
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.HandleReader = (*ReadFileHandle)(nil)
|
||||
|
||||
// Read from the file handle
|
||||
func (fh *ReadFileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) (err error) {
|
||||
dataRead := -1
|
||||
defer fs.Trace(fh, "len=%d, offset=%d", req.Size, req.Offset)("read=%d, err=%v", &dataRead, &err)
|
||||
data, err := fh.ReadFileHandle.Read(int64(req.Size), req.Offset)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
resp.Data = data
|
||||
dataRead = len(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.HandleFlusher = (*ReadFileHandle)(nil)
|
||||
|
||||
// Flush is called each time the file or directory is closed.
|
||||
// Because there can be multiple file descriptors referring to a
|
||||
// single opened file, Flush can be called multiple times.
|
||||
func (fh *ReadFileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) (err error) {
|
||||
defer fs.Trace(fh, "")("err=%v", &err)
|
||||
return translateError(fh.ReadFileHandle.Flush())
|
||||
}
|
||||
|
||||
var _ fusefs.HandleReleaser = (*ReadFileHandle)(nil)
|
||||
|
||||
// Release is called when we are finished with the file handle
|
||||
//
|
||||
// It isn't called directly from userspace so the error is ignored by
|
||||
// the kernel
|
||||
func (fh *ReadFileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) (err error) {
|
||||
defer fs.Trace(fh, "")("err=%v", &err)
|
||||
return translateError(fh.ReadFileHandle.Release())
|
||||
}
|
||||
72
cmd/mount/test/seek_speed.go
Normal file
72
cmd/mount/test/seek_speed.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// +build ignore
|
||||
|
||||
// Read blocks out of a single file to time the seeking code
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
iterations = flag.Int("n", 25, "Iterations to try")
|
||||
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
|
||||
randSeed = flag.Int64("seed", 1, "Seed for the random number generator")
|
||||
)
|
||||
|
||||
func randomSeekTest(size int64, in *os.File, name string) {
|
||||
start := rand.Int63n(size)
|
||||
blockSize := rand.Intn(*maxBlockSize)
|
||||
if int64(blockSize) > size-start {
|
||||
blockSize = int(size - start)
|
||||
}
|
||||
log.Printf("Reading %d from %d", blockSize, start)
|
||||
|
||||
_, err := in.Seek(start, 0)
|
||||
if err != nil {
|
||||
log.Fatalf("Seek failed on %q: %v", name, err)
|
||||
}
|
||||
|
||||
buf := make([]byte, blockSize)
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
log.Fatalf("Read failed on %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Require 1 file as argument")
|
||||
}
|
||||
rand.Seed(*randSeed)
|
||||
|
||||
name := args[0]
|
||||
in, err := os.Open(name)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open %q: %v", name, err)
|
||||
}
|
||||
|
||||
fi, err := in.Stat()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't stat %q: %v", name, err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
for i := 0; i < *iterations; i++ {
|
||||
randomSeekTest(fi.Size(), in, name)
|
||||
}
|
||||
dt := time.Since(start)
|
||||
log.Printf("That took %v for %d iterations, %v per iteration", dt, *iterations, dt/time.Duration(*iterations))
|
||||
|
||||
err = in.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error closing %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
115
cmd/mount/test/seeker.go
Normal file
115
cmd/mount/test/seeker.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// +build ignore
|
||||
|
||||
// Read two files with lots of seeking to stress test the seek code
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
iterations = flag.Int("n", 1E6, "Iterations to try")
|
||||
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func randomSeekTest(size int64, in1, in2 *os.File, file1, file2 string) {
|
||||
start := rand.Int63n(size)
|
||||
blockSize := rand.Intn(*maxBlockSize)
|
||||
if int64(blockSize) > size-start {
|
||||
blockSize = int(size - start)
|
||||
}
|
||||
log.Printf("Reading %d from %d", blockSize, start)
|
||||
|
||||
_, err := in1.Seek(start, 0)
|
||||
if err != nil {
|
||||
log.Fatalf("Seek failed on %q: %v", file1, err)
|
||||
}
|
||||
_, err = in2.Seek(start, 0)
|
||||
if err != nil {
|
||||
log.Fatalf("Seek failed on %q: %v", file2, err)
|
||||
}
|
||||
|
||||
buf1 := make([]byte, blockSize)
|
||||
n1, err := io.ReadFull(in1, buf1)
|
||||
if err != nil {
|
||||
log.Fatalf("Read failed on %q: %v", file1, err)
|
||||
}
|
||||
|
||||
buf2 := make([]byte, blockSize)
|
||||
n2, err := io.ReadFull(in2, buf2)
|
||||
if err != nil {
|
||||
log.Fatalf("Read failed on %q: %v", file2, err)
|
||||
}
|
||||
|
||||
if n1 != n2 {
|
||||
log.Fatalf("Read different lengths %d (%q) != %d (%q)", n1, file1, n2, file2)
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf1, buf2) {
|
||||
log.Printf("Dumping different blocks")
|
||||
err = ioutil.WriteFile("/tmp/z1", buf1, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write /tmp/z1: %v", err)
|
||||
}
|
||||
err = ioutil.WriteFile("/tmp/z2", buf2, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write /tmp/z2: %v", err)
|
||||
}
|
||||
log.Fatalf("Read different contents - saved in /tmp/z1 and /tmp/z2")
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 2 {
|
||||
log.Fatalf("Require 2 files as argument")
|
||||
}
|
||||
file1, file2 := args[0], args[1]
|
||||
in1, err := os.Open(file1)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open %q: %v", file1, err)
|
||||
}
|
||||
in2, err := os.Open(file2)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open %q: %v", file2, err)
|
||||
}
|
||||
|
||||
fi1, err := in1.Stat()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't stat %q: %v", file1, err)
|
||||
}
|
||||
fi2, err := in2.Stat()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't stat %q: %v", file2, err)
|
||||
}
|
||||
|
||||
if fi1.Size() != fi2.Size() {
|
||||
log.Fatalf("Files not the same size")
|
||||
}
|
||||
|
||||
for i := 0; i < *iterations; i++ {
|
||||
randomSeekTest(fi1.Size(), in1, in2, file1, file2)
|
||||
}
|
||||
|
||||
err = in1.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error closing %q: %v", file1, err)
|
||||
}
|
||||
err = in2.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error closing %q: %v", file2, err)
|
||||
}
|
||||
}
|
||||
129
cmd/mount/test/seekers.go
Normal file
129
cmd/mount/test/seekers.go
Normal file
@@ -0,0 +1,129 @@
|
||||
// +build ignore
|
||||
|
||||
// Read lots files with lots of simultaneous seeking to stress test the seek code
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
iterations = flag.Int("n", 1E6, "Iterations to try")
|
||||
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
|
||||
simultaneous = flag.Int("transfers", 16, "Number of simultaneous files to open")
|
||||
seeksPerFile = flag.Int("seeks", 8, "Seeks per file")
|
||||
mask = flag.Int64("mask", 0, "mask for seek, eg 0x7fff")
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func seekTest(n int, file string) {
|
||||
in, err := os.Open(file)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open %q: %v", file, err)
|
||||
}
|
||||
fi, err := in.Stat()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't stat %q: %v", file, err)
|
||||
}
|
||||
size := fi.Size()
|
||||
|
||||
// FIXME make sure we try start and end
|
||||
|
||||
maxBlockSize := *maxBlockSize
|
||||
if int64(maxBlockSize) > size {
|
||||
maxBlockSize = int(size)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
start := rand.Int63n(size)
|
||||
if *mask != 0 {
|
||||
start &^= *mask
|
||||
}
|
||||
blockSize := rand.Intn(maxBlockSize)
|
||||
beyondEnd := false
|
||||
switch rand.Intn(10) {
|
||||
case 0:
|
||||
start = 0
|
||||
case 1:
|
||||
start = size - int64(blockSize)
|
||||
case 2:
|
||||
// seek beyond the end
|
||||
start = size + int64(blockSize)
|
||||
beyondEnd = true
|
||||
default:
|
||||
}
|
||||
if !beyondEnd && int64(blockSize) > size-start {
|
||||
blockSize = int(size - start)
|
||||
}
|
||||
log.Printf("%s: Reading %d from %d", file, blockSize, start)
|
||||
|
||||
_, err = in.Seek(start, 0)
|
||||
if err != nil {
|
||||
log.Fatalf("Seek failed on %q: %v", file, err)
|
||||
}
|
||||
|
||||
buf := make([]byte, blockSize)
|
||||
n, err := io.ReadFull(in, buf)
|
||||
if beyondEnd && err == io.EOF {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
log.Fatalf("Read failed on %q: %v (%d)", file, err, n)
|
||||
}
|
||||
}
|
||||
|
||||
err = in.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error closing %q: %v", file, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Find all the files in dir
|
||||
func findFiles(dir string) (files []string) {
|
||||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if info.Mode().IsRegular() && info.Size() > 0 {
|
||||
files = append(files, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
sort.Strings(files)
|
||||
return files
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Require a directory as argument")
|
||||
}
|
||||
dir := args[0]
|
||||
files := findFiles(dir)
|
||||
jobs := make(chan string, *simultaneous)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(*simultaneous)
|
||||
for i := 0; i < *simultaneous; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for file := range jobs {
|
||||
seekTest(*seeksPerFile, file)
|
||||
}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < *iterations; i++ {
|
||||
i := rand.Intn(len(files))
|
||||
jobs <- files[i]
|
||||
//jobs <- files[i]
|
||||
}
|
||||
close(jobs)
|
||||
wg.Wait()
|
||||
}
|
||||
68
cmd/mount/write.go
Normal file
68
cmd/mount/write.go
Normal file
@@ -0,0 +1,68 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var errClosedFileHandle = errors.New("Attempt to use closed file handle")
|
||||
|
||||
// WriteFileHandle is an open for write handle on a File
|
||||
type WriteFileHandle struct {
|
||||
*mountlib.WriteFileHandle
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.Handle = (*WriteFileHandle)(nil)
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.HandleWriter = (*WriteFileHandle)(nil)
|
||||
|
||||
// Write data to the file handle
|
||||
func (fh *WriteFileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
|
||||
defer fs.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
|
||||
n, err := fh.WriteFileHandle.Write(req.Data, req.Offset)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
resp.Size = int(n)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush is called on each close() of a file descriptor. So if a
|
||||
// filesystem wants to return write errors in close() and the file has
|
||||
// cached dirty data, this is a good place to write back data and
|
||||
// return any errors. Since many applications ignore close() errors
|
||||
// this is not always useful.
|
||||
//
|
||||
// NOTE: The flush() method may be called more than once for each
|
||||
// open(). This happens if more than one file descriptor refers to an
|
||||
// opened file due to dup(), dup2() or fork() calls. It is not
|
||||
// possible to determine if a flush is final, so each flush should be
|
||||
// treated equally. Multiple write-flush sequences are relatively
|
||||
// rare, so this shouldn't be a problem.
|
||||
//
|
||||
// Filesystems shouldn't assume that flush will always be called after
|
||||
// some writes, or that if will be called at all.
|
||||
func (fh *WriteFileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) (err error) {
|
||||
defer fs.Trace(fh, "")("err=%v", &err)
|
||||
return translateError(fh.WriteFileHandle.Flush())
|
||||
}
|
||||
|
||||
var _ fusefs.HandleReleaser = (*WriteFileHandle)(nil)
|
||||
|
||||
// Release is called when we are finished with the file handle
|
||||
//
|
||||
// It isn't called directly from userspace so the error is ignored by
|
||||
// the kernel
|
||||
func (fh *WriteFileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) (err error) {
|
||||
defer fs.Trace(fh, "")("err=%v", &err)
|
||||
return translateError(fh.WriteFileHandle.Release())
|
||||
}
|
||||
60
cmd/mountlib/createinfo.go
Normal file
60
cmd/mountlib/createinfo.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// info to create a new object
|
||||
type createInfo struct {
|
||||
f fs.Fs
|
||||
remote string
|
||||
}
|
||||
|
||||
func newCreateInfo(f fs.Fs, remote string) *createInfo {
|
||||
return &createInfo{
|
||||
f: f,
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (ci *createInfo) Fs() fs.Info {
|
||||
return ci.f
|
||||
}
|
||||
|
||||
// String returns the remote path
|
||||
func (ci *createInfo) String() string {
|
||||
return ci.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (ci *createInfo) Remote() string {
|
||||
return ci.remote
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (ci *createInfo) Hash(fs.HashType) (string, error) {
|
||||
return "", fs.ErrHashUnsupported
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (ci *createInfo) ModTime() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (ci *createInfo) Size() int64 {
|
||||
// FIXME this means this won't work with all remotes...
|
||||
return 0
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (ci *createInfo) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
var _ fs.ObjectInfo = (*createInfo)(nil)
|
||||
486
cmd/mountlib/dir.go
Normal file
486
cmd/mountlib/dir.go
Normal file
@@ -0,0 +1,486 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DirEntry describes the contents of a directory entry
|
||||
//
|
||||
// It can be a file or a directory
|
||||
//
|
||||
// node may be nil, but o may not
|
||||
type DirEntry struct {
|
||||
Obj fs.DirEntry
|
||||
Node Node
|
||||
}
|
||||
|
||||
// Dir represents a directory entry
|
||||
type Dir struct {
|
||||
fsys *FS
|
||||
inode uint64 // inode number
|
||||
f fs.Fs
|
||||
path string
|
||||
modTime time.Time
|
||||
mu sync.Mutex // protects the following
|
||||
read time.Time // time directory entry last read
|
||||
items map[string]*DirEntry // NB can be nil when directory not read yet
|
||||
}
|
||||
|
||||
func newDir(fsys *FS, f fs.Fs, fsDir fs.Directory) *Dir {
|
||||
return &Dir{
|
||||
fsys: fsys,
|
||||
f: f,
|
||||
path: fsDir.Remote(),
|
||||
modTime: fsDir.ModTime(),
|
||||
inode: NewInode(),
|
||||
}
|
||||
}
|
||||
|
||||
// String converts it to printablee
|
||||
func (d *Dir) String() string {
|
||||
if d == nil {
|
||||
return "<nil *Dir>"
|
||||
}
|
||||
return d.path + "/"
|
||||
}
|
||||
|
||||
// IsFile returns false for Dir - satisfies Node interface
|
||||
func (d *Dir) IsFile() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Inode returns the inode number - satisfies Node interface
|
||||
func (d *Dir) Inode() uint64 {
|
||||
return d.inode
|
||||
}
|
||||
|
||||
// Node returns the Node assocuated with this - satisfies Noder interface
|
||||
func (d *Dir) Node() Node {
|
||||
return d
|
||||
}
|
||||
|
||||
// ForgetAll ensures the directory and all its children are purged
|
||||
// from the cache.
|
||||
func (d *Dir) ForgetAll() {
|
||||
d.ForgetPath("")
|
||||
}
|
||||
|
||||
// ForgetPath clears the cache for itself and all subdirectories if
|
||||
// they match the given path. The path is specified relative from the
|
||||
// directory it is called from.
|
||||
// It is not possible to traverse the directory tree upwards, i.e.
|
||||
// you cannot clear the cache for the Dir's ancestors or siblings.
|
||||
func (d *Dir) ForgetPath(relativePath string) {
|
||||
absPath := path.Join(d.path, relativePath)
|
||||
if absPath == "." {
|
||||
absPath = ""
|
||||
}
|
||||
|
||||
d.walk(absPath, func(dir *Dir) {
|
||||
fs.Debugf(dir.path, "forgetting directory cache")
|
||||
dir.read = time.Time{}
|
||||
dir.items = nil
|
||||
})
|
||||
}
|
||||
|
||||
// walk runs a function on all directories whose path matches
|
||||
// the given absolute one. It will be called on a directory's
|
||||
// children first. It will not apply the function to parent
|
||||
// nodes, regardless of the given path.
|
||||
func (d *Dir) walk(absPath string, fun func(*Dir)) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
if d.items != nil {
|
||||
for _, entry := range d.items {
|
||||
if dir, ok := entry.Node.(*Dir); ok {
|
||||
dir.walk(absPath, fun)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.path == absPath || absPath == "" || strings.HasPrefix(d.path, absPath+"/") {
|
||||
fun(d)
|
||||
}
|
||||
}
|
||||
|
||||
// rename should be called after the directory is renamed
|
||||
//
|
||||
// Reset the directory to new state, discarding all the objects and
|
||||
// reading everything again
|
||||
func (d *Dir) rename(newParent *Dir, fsDir fs.Directory) {
|
||||
d.ForgetAll()
|
||||
d.path = fsDir.Remote()
|
||||
d.modTime = fsDir.ModTime()
|
||||
d.read = time.Time{}
|
||||
}
|
||||
|
||||
// addObject adds a new object or directory to the directory
|
||||
//
|
||||
// note that we add new objects rather than updating old ones
|
||||
func (d *Dir) addObject(o fs.DirEntry, node Node) *DirEntry {
|
||||
item := &DirEntry{
|
||||
Obj: o,
|
||||
Node: node,
|
||||
}
|
||||
d.mu.Lock()
|
||||
if d.items != nil {
|
||||
d.items[path.Base(o.Remote())] = item
|
||||
}
|
||||
d.mu.Unlock()
|
||||
return item
|
||||
}
|
||||
|
||||
// delObject removes an object from the directory
|
||||
func (d *Dir) delObject(leaf string) {
|
||||
d.mu.Lock()
|
||||
if d.items != nil {
|
||||
delete(d.items, leaf)
|
||||
}
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
||||
// read the directory and sets d.items - must be called with the lock held
|
||||
func (d *Dir) _readDir() error {
|
||||
when := time.Now()
|
||||
if d.read.IsZero() || d.items == nil {
|
||||
// fs.Debugf(d.path, "Reading directory")
|
||||
} else {
|
||||
age := when.Sub(d.read)
|
||||
if age < d.fsys.dirCacheTime {
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(d.path, "Re-reading directory (%v old)", age)
|
||||
}
|
||||
entries, err := fs.ListDirSorted(d.f, false, d.path)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// We treat directory not found as empty because we
|
||||
// create directories on the fly
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
// NB when we re-read a directory after its cache has expired
|
||||
// we drop the old files which should lead to correct
|
||||
// behaviour but may not be very efficient.
|
||||
|
||||
// Keep a note of the previous contents of the directory
|
||||
oldItems := d.items
|
||||
|
||||
// Cache the items by name
|
||||
d.items = make(map[string]*DirEntry, len(entries))
|
||||
for _, entry := range entries {
|
||||
switch item := entry.(type) {
|
||||
case fs.Object:
|
||||
obj := item
|
||||
name := path.Base(obj.Remote())
|
||||
d.items[name] = &DirEntry{
|
||||
Obj: obj,
|
||||
Node: nil,
|
||||
}
|
||||
case fs.Directory:
|
||||
dir := item
|
||||
name := path.Base(dir.Remote())
|
||||
// Use old dir value if it exists
|
||||
if oldItems != nil {
|
||||
if oldItem, ok := oldItems[name]; ok {
|
||||
if _, ok := oldItem.Obj.(fs.Directory); ok {
|
||||
d.items[name] = oldItem
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
d.items[name] = &DirEntry{
|
||||
Obj: dir,
|
||||
Node: nil,
|
||||
}
|
||||
default:
|
||||
err = errors.Errorf("unknown type %T", item)
|
||||
fs.Errorf(d.path, "readDir error: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.read = when
|
||||
return nil
|
||||
}
|
||||
|
||||
// lookup a single item in the directory
|
||||
//
|
||||
// returns ENOENT if not found.
|
||||
func (d *Dir) lookup(leaf string) (*DirEntry, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
err := d._readDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item, ok := d.items[leaf]
|
||||
if !ok {
|
||||
return nil, ENOENT
|
||||
}
|
||||
return item, nil
|
||||
}
|
||||
|
||||
// Check to see if a directory is empty
|
||||
func (d *Dir) isEmpty() (bool, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
err := d._readDir()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(d.items) == 0, nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the directory
|
||||
func (d *Dir) ModTime() time.Time {
|
||||
// fs.Debugf(d.path, "Dir.ModTime %v", d.modTime)
|
||||
return d.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modTime for this dir
|
||||
func (d *Dir) SetModTime(modTime time.Time) error {
|
||||
if d.fsys.readOnly {
|
||||
return EROFS
|
||||
}
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
// lookupNode calls lookup then makes sure the node is not nil in the DirEntry
|
||||
func (d *Dir) lookupNode(leaf string) (item *DirEntry, err error) {
|
||||
item, err = d.lookup(leaf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if item.Node != nil {
|
||||
return item, nil
|
||||
}
|
||||
var node Node
|
||||
switch x := item.Obj.(type) {
|
||||
case fs.Object:
|
||||
node, err = newFile(d, x, leaf), nil
|
||||
case fs.Directory:
|
||||
node, err = newDir(d.fsys, d.f, x), nil
|
||||
default:
|
||||
err = errors.Errorf("unknown type %T", item)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item = d.addObject(item.Obj, node)
|
||||
return item, nil
|
||||
}
|
||||
|
||||
// Lookup looks up a specific entry in the receiver.
|
||||
//
|
||||
// Lookup should return a Node corresponding to the entry. If the
|
||||
// name does not exist in the directory, Lookup should return ENOENT.
|
||||
//
|
||||
// Lookup need not to handle the names "." and "..".
|
||||
func (d *Dir) Lookup(name string) (node Node, err error) {
|
||||
path := path.Join(d.path, name)
|
||||
// fs.Debugf(path, "Dir.Lookup")
|
||||
item, err := d.lookupNode(name)
|
||||
if err != nil {
|
||||
if err != ENOENT {
|
||||
fs.Errorf(path, "Dir.Lookup error: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// fs.Debugf(path, "Dir.Lookup OK")
|
||||
return item.Node, nil
|
||||
}
|
||||
|
||||
// ReadDirAll reads the contents of the directory
|
||||
func (d *Dir) ReadDirAll() (items []*DirEntry, err error) {
|
||||
// fs.Debugf(d.path, "Dir.ReadDirAll")
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
err = d._readDir()
|
||||
if err != nil {
|
||||
fs.Debugf(d.path, "Dir.ReadDirAll error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
for _, item := range d.items {
|
||||
items = append(items, item)
|
||||
}
|
||||
// fs.Debugf(d.path, "Dir.ReadDirAll OK with %d entries", len(items))
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// Create makes a new file
|
||||
func (d *Dir) Create(name string) (*File, *WriteFileHandle, error) {
|
||||
if d.fsys.readOnly {
|
||||
return nil, nil, EROFS
|
||||
}
|
||||
path := path.Join(d.path, name)
|
||||
// fs.Debugf(path, "Dir.Create")
|
||||
src := newCreateInfo(d.f, path)
|
||||
// This gets added to the directory when the file is written
|
||||
file := newFile(d, nil, name)
|
||||
fh, err := newWriteFileHandle(d, file, src)
|
||||
if err != nil {
|
||||
fs.Errorf(path, "Dir.Create error: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
// fs.Debugf(path, "Dir.Create OK")
|
||||
return file, fh, nil
|
||||
}
|
||||
|
||||
// Mkdir creates a new directory
|
||||
func (d *Dir) Mkdir(name string) (*Dir, error) {
|
||||
if d.fsys.readOnly {
|
||||
return nil, EROFS
|
||||
}
|
||||
path := path.Join(d.path, name)
|
||||
// fs.Debugf(path, "Dir.Mkdir")
|
||||
err := d.f.Mkdir(path)
|
||||
if err != nil {
|
||||
fs.Errorf(path, "Dir.Mkdir failed to create directory: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
fsDir := fs.NewDir(path, time.Now())
|
||||
dir := newDir(d.fsys, d.f, fsDir)
|
||||
d.addObject(fsDir, dir)
|
||||
// fs.Debugf(path, "Dir.Mkdir OK")
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// Remove removes the entry with the given name from
|
||||
// the receiver, which must be a directory. The entry to be removed
|
||||
// may correspond to a file (unlink) or to a directory (rmdir).
|
||||
func (d *Dir) Remove(name string) error {
|
||||
if d.fsys.readOnly {
|
||||
return EROFS
|
||||
}
|
||||
path := path.Join(d.path, name)
|
||||
// fs.Debugf(path, "Dir.Remove")
|
||||
item, err := d.lookupNode(name)
|
||||
if err != nil {
|
||||
fs.Errorf(path, "Dir.Remove error: %v", err)
|
||||
return err
|
||||
}
|
||||
switch x := item.Obj.(type) {
|
||||
case fs.Object:
|
||||
err = x.Remove()
|
||||
if err != nil {
|
||||
fs.Errorf(path, "Dir.Remove file error: %v", err)
|
||||
return err
|
||||
}
|
||||
case fs.Directory:
|
||||
// Check directory is empty first
|
||||
dir := item.Node.(*Dir)
|
||||
empty, err := dir.isEmpty()
|
||||
if err != nil {
|
||||
fs.Errorf(path, "Dir.Remove dir error: %v", err)
|
||||
return err
|
||||
}
|
||||
if !empty {
|
||||
fs.Errorf(path, "Dir.Remove not empty")
|
||||
return ENOTEMPTY
|
||||
}
|
||||
// remove directory
|
||||
err = d.f.Rmdir(path)
|
||||
if err != nil {
|
||||
fs.Errorf(path, "Dir.Remove failed to remove directory: %v", err)
|
||||
return err
|
||||
}
|
||||
default:
|
||||
fs.Errorf(path, "Dir.Remove unknown type %T", item)
|
||||
return errors.Errorf("unknown type %T", item)
|
||||
}
|
||||
// Remove the item from the directory listing
|
||||
d.delObject(name)
|
||||
// fs.Debugf(path, "Dir.Remove OK")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rename the file
|
||||
func (d *Dir) Rename(oldName, newName string, destDir *Dir) error {
|
||||
if d.fsys.readOnly {
|
||||
return EROFS
|
||||
}
|
||||
oldPath := path.Join(d.path, oldName)
|
||||
newPath := path.Join(destDir.path, newName)
|
||||
// fs.Debugf(oldPath, "Dir.Rename to %q", newPath)
|
||||
oldItem, err := d.lookupNode(oldName)
|
||||
if err != nil {
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
var newObj fs.DirEntry
|
||||
oldNode := oldItem.Node
|
||||
switch x := oldItem.Obj.(type) {
|
||||
case fs.Object:
|
||||
oldObject := x
|
||||
// FIXME: could Copy then Delete if Move not available
|
||||
// - though care needed if case insensitive...
|
||||
doMove := d.f.Features().Move
|
||||
if doMove == nil {
|
||||
err := errors.Errorf("Fs %q can't rename files (no Move)", d.f)
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
newObject, err := doMove(oldObject, newPath)
|
||||
if err != nil {
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
newObj = newObject
|
||||
// Update the node with the new details
|
||||
if oldNode != nil {
|
||||
if oldFile, ok := oldNode.(*File); ok {
|
||||
fs.Debugf(oldItem.Obj, "Updating file with %v %p", newObject, oldFile)
|
||||
oldFile.rename(destDir, newObject)
|
||||
}
|
||||
}
|
||||
case fs.Directory:
|
||||
doDirMove := d.f.Features().DirMove
|
||||
if doDirMove == nil {
|
||||
err := errors.Errorf("Fs %q can't rename directories (no DirMove)", d.f)
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
srcRemote := x.Remote()
|
||||
dstRemote := newPath
|
||||
err = doDirMove(d.f, srcRemote, dstRemote)
|
||||
if err != nil {
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
newDir := fs.NewDirCopy(x).SetRemote(newPath)
|
||||
newObj = newDir
|
||||
// Update the node with the new details
|
||||
if oldNode != nil {
|
||||
if oldDir, ok := oldNode.(*Dir); ok {
|
||||
fs.Debugf(oldItem.Obj, "Updating dir with %v %p", newDir, oldDir)
|
||||
oldDir.rename(destDir, newDir)
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = errors.Errorf("unknown type %T", oldItem)
|
||||
fs.Errorf(d.path, "Dir.ReadDirAll error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Show moved - delete from old dir and add to new
|
||||
d.delObject(oldName)
|
||||
destDir.addObject(newObj, oldNode)
|
||||
|
||||
// fs.Debugf(newPath, "Dir.Rename renamed from %q", oldPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fsync the directory
|
||||
//
|
||||
// Note that we don't do anything except return OK
|
||||
func (d *Dir) Fsync() error {
|
||||
return nil
|
||||
}
|
||||
39
cmd/mountlib/errors.go
Normal file
39
cmd/mountlib/errors.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// Cross platform errors
|
||||
|
||||
package mountlib
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Error describes low level errors in a cross platform way
|
||||
type Error byte
|
||||
|
||||
// NB if changing errors translateError in cmd/mount/fs.go, cmd/cmount/fs.go
|
||||
|
||||
// Low level errors
|
||||
const (
|
||||
OK Error = iota
|
||||
ENOENT
|
||||
ENOTEMPTY
|
||||
EEXIST
|
||||
ESPIPE
|
||||
EBADF
|
||||
EROFS
|
||||
)
|
||||
|
||||
var errorNames = []string{
|
||||
OK: "Success",
|
||||
ENOENT: "No such file or directory",
|
||||
ENOTEMPTY: "Directory not empty",
|
||||
EEXIST: "File exists",
|
||||
ESPIPE: "Illegal seek",
|
||||
EBADF: "Bad file descriptor",
|
||||
EROFS: "Read only file system",
|
||||
}
|
||||
|
||||
// Error renders the error as a string
|
||||
func (e Error) Error() string {
|
||||
if int(e) >= len(errorNames) {
|
||||
return fmt.Sprintf("Low level error %d", e)
|
||||
}
|
||||
return errorNames[e]
|
||||
}
|
||||
220
cmd/mountlib/file.go
Normal file
220
cmd/mountlib/file.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// File represents a file
|
||||
type File struct {
|
||||
inode uint64 // inode number
|
||||
size int64 // size of file - read and written with atomic int64 - must be 64 bit aligned
|
||||
d *Dir // parent directory - read only
|
||||
mu sync.RWMutex // protects the following
|
||||
o fs.Object // NB o may be nil if file is being written
|
||||
leaf string // leaf name of the object
|
||||
writers int // number of writers for this file
|
||||
pendingModTime time.Time // will be applied once o becomes available, i.e. after file was written
|
||||
}
|
||||
|
||||
// newFile creates a new File
|
||||
func newFile(d *Dir, o fs.Object, leaf string) *File {
|
||||
return &File{
|
||||
d: d,
|
||||
o: o,
|
||||
leaf: leaf,
|
||||
inode: NewInode(),
|
||||
}
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
func (f *File) String() string {
|
||||
if f == nil {
|
||||
return "<nil *File>"
|
||||
}
|
||||
return path.Join(f.d.path, f.leaf)
|
||||
}
|
||||
|
||||
// IsFile returns true for File - satisfies Node interface
|
||||
func (f *File) IsFile() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Inode returns the inode number - satisfies Node interface
|
||||
func (f *File) Inode() uint64 {
|
||||
return f.inode
|
||||
}
|
||||
|
||||
// Node returns the Node assocuated with this - satisfies Noder interface
|
||||
func (f *File) Node() Node {
|
||||
return f
|
||||
}
|
||||
|
||||
// rename should be called to update f.o and f.d after a rename
|
||||
func (f *File) rename(d *Dir, o fs.Object) {
|
||||
f.mu.Lock()
|
||||
f.o = o
|
||||
f.d = d
|
||||
f.mu.Unlock()
|
||||
}
|
||||
|
||||
// addWriters increments or decrements the writers
|
||||
func (f *File) addWriters(n int) {
|
||||
f.mu.Lock()
|
||||
f.writers += n
|
||||
f.mu.Unlock()
|
||||
}
|
||||
|
||||
// Attr fills out the attributes for the file
|
||||
func (f *File) Attr(noModTime bool) (modTime time.Time, Size, Blocks uint64, err error) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
// if o is nil it isn't valid yet or there are writers, so return the size so far
|
||||
if f.o == nil || f.writers != 0 {
|
||||
Size = uint64(atomic.LoadInt64(&f.size))
|
||||
if !noModTime && !f.pendingModTime.IsZero() {
|
||||
modTime = f.pendingModTime
|
||||
}
|
||||
} else {
|
||||
Size = uint64(f.o.Size())
|
||||
if !noModTime {
|
||||
modTime = f.o.ModTime()
|
||||
}
|
||||
}
|
||||
Blocks = (Size + 511) / 512
|
||||
// fs.Debugf(f.o, "File.Attr modTime=%v, Size=%d, Blocks=%v", modTime, Size, Blocks)
|
||||
return
|
||||
}
|
||||
|
||||
// SetModTime sets the modtime for the file
|
||||
func (f *File) SetModTime(modTime time.Time) error {
|
||||
if f.d.fsys.readOnly {
|
||||
return EROFS
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
f.pendingModTime = modTime
|
||||
|
||||
if f.o != nil {
|
||||
return f.applyPendingModTime()
|
||||
}
|
||||
|
||||
// queue up for later, hoping f.o becomes available
|
||||
return nil
|
||||
}
|
||||
|
||||
// call with the mutex held
|
||||
func (f *File) applyPendingModTime() error {
|
||||
defer func() { f.pendingModTime = time.Time{} }()
|
||||
|
||||
if f.pendingModTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.o == nil {
|
||||
return errors.New("Cannot apply ModTime, file object is not available")
|
||||
}
|
||||
|
||||
err := f.o.SetModTime(f.pendingModTime)
|
||||
switch err {
|
||||
case nil:
|
||||
fs.Debugf(f.o, "File.applyPendingModTime OK")
|
||||
case fs.ErrorCantSetModTime, fs.ErrorCantSetModTimeWithoutDelete:
|
||||
// do nothing, in order to not break "touch somefile" if it exists already
|
||||
default:
|
||||
fs.Errorf(f.o, "File.applyPendingModTime error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the size while writing
|
||||
func (f *File) setSize(n int64) {
|
||||
atomic.StoreInt64(&f.size, n)
|
||||
}
|
||||
|
||||
// Update the object when written
|
||||
func (f *File) setObject(o fs.Object) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.o = o
|
||||
_ = f.applyPendingModTime()
|
||||
f.d.addObject(o, f)
|
||||
}
|
||||
|
||||
// Wait for f.o to become non nil for a short time returning it or an
|
||||
// error
|
||||
//
|
||||
// Call without the mutex held
|
||||
func (f *File) waitForValidObject() (o fs.Object, err error) {
|
||||
for i := 0; i < 50; i++ {
|
||||
f.mu.Lock()
|
||||
o = f.o
|
||||
writers := f.writers
|
||||
f.mu.Unlock()
|
||||
if o != nil {
|
||||
return o, nil
|
||||
}
|
||||
if writers == 0 {
|
||||
return nil, errors.New("can't open file - writer failed")
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return nil, ENOENT
|
||||
}
|
||||
|
||||
// OpenRead open the file for read
|
||||
func (f *File) OpenRead() (fh *ReadFileHandle, err error) {
|
||||
// if o is nil it isn't valid yet
|
||||
o, err := f.waitForValidObject()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// fs.Debugf(o, "File.OpenRead")
|
||||
|
||||
fh, err = newReadFileHandle(f, o)
|
||||
err = errors.Wrap(err, "open for read")
|
||||
|
||||
if err != nil {
|
||||
fs.Errorf(o, "File.OpenRead failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// OpenWrite open the file for write
|
||||
func (f *File) OpenWrite() (fh *WriteFileHandle, err error) {
|
||||
if f.d.fsys.readOnly {
|
||||
return nil, EROFS
|
||||
}
|
||||
// if o is nil it isn't valid yet
|
||||
o, err := f.waitForValidObject()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// fs.Debugf(o, "File.OpenWrite")
|
||||
|
||||
src := newCreateInfo(f.d.f, o.Remote())
|
||||
fh, err = newWriteFileHandle(f.d, f, src)
|
||||
err = errors.Wrap(err, "open for write")
|
||||
|
||||
if err != nil {
|
||||
fs.Errorf(o, "File.OpenWrite failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// Fsync the file
|
||||
//
|
||||
// Note that we don't do anything except return OK
|
||||
func (f *File) Fsync() error {
|
||||
return nil
|
||||
}
|
||||
139
cmd/mountlib/fs.go
Normal file
139
cmd/mountlib/fs.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Node represents either a *Dir or a *File
|
||||
type Node interface {
|
||||
IsFile() bool
|
||||
Inode() uint64
|
||||
}
|
||||
|
||||
var (
|
||||
_ Node = (*File)(nil)
|
||||
_ Node = (*Dir)(nil)
|
||||
)
|
||||
|
||||
// Noder represents something which can return a node
|
||||
type Noder interface {
|
||||
fmt.Stringer
|
||||
Node() Node
|
||||
}
|
||||
|
||||
var (
|
||||
_ Noder = (*File)(nil)
|
||||
_ Noder = (*Dir)(nil)
|
||||
_ Noder = (*ReadFileHandle)(nil)
|
||||
_ Noder = (*WriteFileHandle)(nil)
|
||||
)
|
||||
|
||||
// FS represents the top level filing system
|
||||
type FS struct {
|
||||
f fs.Fs
|
||||
root *Dir
|
||||
noSeek bool // don't allow seeking if set
|
||||
noChecksum bool // don't check checksums if set
|
||||
readOnly bool // if set FS is read only
|
||||
dirCacheTime time.Duration // how long to consider directory listing cache valid
|
||||
}
|
||||
|
||||
// NewFS creates a new filing system and root directory
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
fsDir := fs.NewDir("", time.Now())
|
||||
fsys := &FS{
|
||||
f: f,
|
||||
}
|
||||
|
||||
if NoSeek {
|
||||
fsys.noSeek = true
|
||||
}
|
||||
if NoChecksum {
|
||||
fsys.noChecksum = true
|
||||
}
|
||||
if ReadOnly {
|
||||
fsys.readOnly = true
|
||||
}
|
||||
fsys.dirCacheTime = DirCacheTime
|
||||
|
||||
fsys.root = newDir(fsys, f, fsDir)
|
||||
|
||||
if PollInterval > 0 {
|
||||
fsys.PollChanges(PollInterval)
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
|
||||
// PollChanges will poll the remote every pollInterval for changes if the remote
|
||||
// supports it. If a non-polling option is used, the given time interval can be
|
||||
// ignored
|
||||
func (fsys *FS) PollChanges(pollInterval time.Duration) *FS {
|
||||
doDirChangeNotify := fsys.f.Features().DirChangeNotify
|
||||
if doDirChangeNotify != nil {
|
||||
doDirChangeNotify(fsys.root.ForgetPath, pollInterval)
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
|
||||
// Root returns the root node
|
||||
func (fsys *FS) Root() (*Dir, error) {
|
||||
// fs.Debugf(fsys.f, "Root()")
|
||||
return fsys.root, nil
|
||||
}
|
||||
|
||||
var inodeCount uint64
|
||||
|
||||
// NewInode creates a new unique inode number
|
||||
func NewInode() (inode uint64) {
|
||||
return atomic.AddUint64(&inodeCount, 1)
|
||||
}
|
||||
|
||||
// Lookup finds the Node by path starting from the root
|
||||
func (fsys *FS) Lookup(path string) (node Node, err error) {
|
||||
node = fsys.root
|
||||
for path != "" {
|
||||
i := strings.IndexRune(path, '/')
|
||||
var name string
|
||||
if i < 0 {
|
||||
name, path = path, ""
|
||||
} else {
|
||||
name, path = path[:i], path[i+1:]
|
||||
}
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
dir, ok := node.(*Dir)
|
||||
if !ok {
|
||||
// We need to look in a directory, but found a file
|
||||
return nil, ENOENT
|
||||
}
|
||||
node, err = dir.Lookup(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Statfs is called to obtain file system metadata.
|
||||
// It should write that data to resp.
|
||||
func (fsys *FS) Statfs() error {
|
||||
/* FIXME
|
||||
const blockSize = 4096
|
||||
const fsBlocks = (1 << 50) / blockSize
|
||||
resp.Blocks = fsBlocks // Total data blocks in file system.
|
||||
resp.Bfree = fsBlocks // Free blocks in file system.
|
||||
resp.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
resp.Files = 1E9 // Total files in file system.
|
||||
resp.Ffree = 1E9 // Free files in file system.
|
||||
resp.Bsize = blockSize // Block size
|
||||
resp.Namelen = 255 // Maximum file name length?
|
||||
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
172
cmd/mountlib/mount.go
Normal file
172
cmd/mountlib/mount.go
Normal file
@@ -0,0 +1,172 @@
|
||||
package mountlib
|
||||
|
||||
// Globals
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
NoModTime = false
|
||||
NoChecksum = false
|
||||
DebugFUSE = false
|
||||
NoSeek = false
|
||||
DirCacheTime = 5 * 60 * time.Second
|
||||
PollInterval = time.Minute
|
||||
// mount options
|
||||
ReadOnly = false
|
||||
AllowNonEmpty = false
|
||||
AllowRoot = false
|
||||
AllowOther = false
|
||||
DefaultPermissions = false
|
||||
WritebackCache = false
|
||||
MaxReadAhead fs.SizeSuffix = 128 * 1024
|
||||
Umask = 0
|
||||
UID = ^uint32(0) // these values instruct WinFSP-FUSE to use the current user
|
||||
GID = ^uint32(0) // overriden for non windows in mount_unix.go
|
||||
// foreground = false
|
||||
// default permissions for directories - modified by umask in Mount
|
||||
DirPerms = os.FileMode(0777)
|
||||
FilePerms = os.FileMode(0666)
|
||||
ExtraOptions *[]string
|
||||
ExtraFlags *[]string
|
||||
)
|
||||
|
||||
// NewMountCommand makes a mount command with the given name and Mount function
|
||||
func NewMountCommand(commandName string, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: commandName + " remote:path /path/to/mountpoint",
|
||||
Short: `Mount the remote as a mountpoint. **EXPERIMENTAL**`,
|
||||
Long: `
|
||||
rclone ` + commandName + ` allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with
|
||||
FUSE.
|
||||
|
||||
This is **EXPERIMENTAL** - use with care.
|
||||
|
||||
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
|
||||
|
||||
Start the mount like this
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files /path/to/local/mount
|
||||
|
||||
Or on Windows like this where X: is an unused drive letter
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files X:
|
||||
|
||||
When the program ends, either via Ctrl+C or receiving a SIGINT or SIGTERM signal,
|
||||
the mount is automatically stopped.
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
When that happens, it is the user's responsibility to stop the mount manually with
|
||||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
### Limitations ###
|
||||
|
||||
This can only write files seqentially, it can only seek when reading.
|
||||
This means that many applications won't work with their files on an
|
||||
rclone mount.
|
||||
|
||||
The bucket based remotes (eg Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) won't work from the root - you will need to specify a bucket,
|
||||
or a path within the bucket. So ` + "`swift:`" + ` won't work whereas
|
||||
` + "`swift:bucket`" + ` will as will ` + "`swift:bucket/path`" + `.
|
||||
None of these support the concept of directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
|
||||
|
||||
### rclone ` + commandName + ` vs rclone sync/copy ##
|
||||
|
||||
File systems expect things to be 100% reliable, whereas cloud storage
|
||||
systems are a long way from 100% reliable. The rclone sync/copy
|
||||
commands cope with this with lots of retries. However rclone ` + commandName + `
|
||||
can't use retries in the same way without making local copies of the
|
||||
uploads. This might happen in the future, but for the moment rclone
|
||||
` + commandName + ` won't do that, so will be less reliable than the rclone command.
|
||||
|
||||
### Filters ###
|
||||
|
||||
Note that all the rclone filters can be used to select a subset of the
|
||||
files to be visible in the mount.
|
||||
|
||||
### Directory Cache ###
|
||||
|
||||
Using the ` + "`--dir-cache-time`" + ` flag, you can set how long a
|
||||
directory should be considered up to date and not refreshed from the
|
||||
backend. Changes made locally in the mount may appear immediately or
|
||||
invalidate the cache. However, changes done on the remote will only
|
||||
be picked up once the cache expires.
|
||||
|
||||
Alternatively, you can send a ` + "`SIGHUP`" + ` signal to rclone for
|
||||
it to flush all directory caches, regardless of how old they are.
|
||||
Assuming only one rclone instance is running, you can reset the cache
|
||||
like this:
|
||||
|
||||
kill -SIGHUP $(pidof rclone)
|
||||
|
||||
### Bugs ###
|
||||
|
||||
* All the remotes should work for read, but some may not for write
|
||||
* those which need to know the size in advance won't - eg B2
|
||||
* maybe should pass in size as -1 to mean work it out
|
||||
* Or put in an an upload cache to cache the files on disk first
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fdst := cmd.NewFsDst(args)
|
||||
|
||||
// Mask permissions
|
||||
DirPerms = 0777 &^ os.FileMode(Umask)
|
||||
FilePerms = 0666 &^ os.FileMode(Umask)
|
||||
|
||||
// Show stats if the user has specifically requested them
|
||||
if cmd.ShowStats() {
|
||||
stopStats := cmd.StartStats()
|
||||
defer close(stopStats)
|
||||
}
|
||||
|
||||
err := Mount(fdst, args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// Register the command
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
|
||||
// Add flags
|
||||
flags := commandDefintion.Flags()
|
||||
flags.BoolVarP(&NoModTime, "no-modtime", "", NoModTime, "Don't read/write the modification time (can speed things up).")
|
||||
flags.BoolVarP(&NoChecksum, "no-checksum", "", NoChecksum, "Don't compare checksums on up/download.")
|
||||
flags.BoolVarP(&DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
|
||||
flags.BoolVarP(&NoSeek, "no-seek", "", NoSeek, "Don't allow seeking in files.")
|
||||
flags.DurationVarP(&DirCacheTime, "dir-cache-time", "", DirCacheTime, "Time to cache directory entries for.")
|
||||
flags.DurationVarP(&PollInterval, "poll-interval", "", PollInterval, "Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable.")
|
||||
// mount options
|
||||
flags.BoolVarP(&ReadOnly, "read-only", "", ReadOnly, "Mount read-only.")
|
||||
flags.BoolVarP(&AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
|
||||
flags.BoolVarP(&AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
|
||||
flags.BoolVarP(&AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
|
||||
flags.BoolVarP(&DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
|
||||
flags.BoolVarP(&WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
|
||||
flags.VarP(&MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
|
||||
ExtraOptions = flags.StringArrayP("option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
|
||||
ExtraFlags = flags.StringArrayP("fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
|
||||
//flags.BoolVarP(&foreground, "foreground", "", foreground, "Do not detach.")
|
||||
|
||||
platformFlags(flags)
|
||||
return commandDefintion
|
||||
}
|
||||
11
cmd/mountlib/mount_non_unix.go
Normal file
11
cmd/mountlib/mount_non_unix.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !linux,!darwin,!freebsd
|
||||
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// add any extra platform specific flags
|
||||
func platformFlags(flags *pflag.FlagSet) {
|
||||
}
|
||||
19
cmd/mountlib/mount_unix.go
Normal file
19
cmd/mountlib/mount_unix.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// add any extra platform specific flags
|
||||
func platformFlags(flags *pflag.FlagSet) {
|
||||
flags.IntVarP(&Umask, "umask", "", Umask, "Override the permission bits set by the filesystem.")
|
||||
Umask = unix.Umask(0) // read the umask
|
||||
unix.Umask(Umask) // set it back to what it was
|
||||
UID = uint32(unix.Geteuid())
|
||||
GID = uint32(unix.Getegid())
|
||||
flags.Uint32VarP(&UID, "uid", "", UID, "Override the uid field set by the filesystem.")
|
||||
flags.Uint32VarP(&GID, "gid", "", GID, "Override the gid field set by the filesystem.")
|
||||
}
|
||||
229
cmd/mountlib/mounttest/dir.go
Normal file
229
cmd/mountlib/mounttest/dir.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDirLs checks out listing
|
||||
func TestDirLs(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.checkDir(t, "")
|
||||
|
||||
run.mkdir(t, "a directory")
|
||||
run.createFile(t, "a file", "hello")
|
||||
|
||||
run.checkDir(t, "a directory/|a file 5")
|
||||
|
||||
run.rmdir(t, "a directory")
|
||||
run.rm(t, "a file")
|
||||
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirCreateAndRemoveDir tests creating and removing a directory
|
||||
func TestDirCreateAndRemoveDir(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.mkdir(t, "dir/subdir")
|
||||
run.checkDir(t, "dir/|dir/subdir/")
|
||||
|
||||
// Check we can't delete a directory with stuff in
|
||||
err := os.Remove(run.path("dir"))
|
||||
assert.Error(t, err, "file exists")
|
||||
|
||||
// Now delete subdir then dir - should produce no errors
|
||||
run.rmdir(t, "dir/subdir")
|
||||
run.checkDir(t, "dir/")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirCreateAndRemoveFile tests creating and removing a file
|
||||
func TestDirCreateAndRemoveFile(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.createFile(t, "dir/file", "potato")
|
||||
run.checkDir(t, "dir/|dir/file 6")
|
||||
|
||||
// Check we can't delete a directory with stuff in
|
||||
err := os.Remove(run.path("dir"))
|
||||
assert.Error(t, err, "file exists")
|
||||
|
||||
// Now delete file
|
||||
run.rm(t, "dir/file")
|
||||
|
||||
run.checkDir(t, "dir/")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirRenameFile tests renaming a file
|
||||
func TestDirRenameFile(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.createFile(t, "file", "potato")
|
||||
run.checkDir(t, "dir/|file 6")
|
||||
|
||||
err := os.Rename(run.path("file"), run.path("file2"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|file2 6")
|
||||
|
||||
data, err := ioutil.ReadFile(run.path("file2"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "potato", string(data))
|
||||
|
||||
err = os.Rename(run.path("file2"), run.path("dir/file3"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|dir/file3 6")
|
||||
|
||||
data, err = ioutil.ReadFile(run.path("dir/file3"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "potato", string(data))
|
||||
|
||||
run.rm(t, "dir/file3")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirRenameEmptyDir tests renaming and empty directory
|
||||
func TestDirRenameEmptyDir(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.mkdir(t, "dir1")
|
||||
run.checkDir(t, "dir/|dir1/")
|
||||
|
||||
err := os.Rename(run.path("dir1"), run.path("dir/dir2"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|dir/dir2/")
|
||||
|
||||
err = os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|dir/dir3/")
|
||||
|
||||
run.rmdir(t, "dir/dir3")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirRenameFullDir tests renaming a full directory
|
||||
func TestDirRenameFullDir(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.mkdir(t, "dir1")
|
||||
run.createFile(t, "dir1/potato.txt", "maris piper")
|
||||
run.checkDir(t, "dir/|dir1/|dir1/potato.txt 11")
|
||||
|
||||
err := os.Rename(run.path("dir1"), run.path("dir/dir2"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|dir/dir2/|dir/dir2/potato.txt 11")
|
||||
|
||||
err = os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|dir/dir3/|dir/dir3/potato.txt 11")
|
||||
|
||||
run.rm(t, "dir/dir3/potato.txt")
|
||||
run.rmdir(t, "dir/dir3")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirModTime tests mod times
|
||||
func TestDirModTime(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
|
||||
err := os.Chtimes(run.path("dir"), mtime, mtime)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(run.path("dir"))
|
||||
require.NoError(t, err)
|
||||
|
||||
// avoid errors because of timezone differences
|
||||
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
|
||||
|
||||
run.rmdir(t, "dir")
|
||||
}
|
||||
|
||||
// TestDirCacheFlush tests fluching the dir cache
|
||||
func TestDirCacheFlush(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.checkDir(t, "")
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.mkdir(t, "otherdir")
|
||||
run.createFile(t, "dir/file", "1")
|
||||
run.createFile(t, "otherdir/file", "1")
|
||||
|
||||
dm := newDirMap("otherdir/|otherdir/file 1|dir/|dir/file 1")
|
||||
localDm := make(dirMap)
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
err := run.fremote.Mkdir("dir/subdir")
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := run.filesys.Root()
|
||||
require.NoError(t, err)
|
||||
|
||||
// expect newly created "subdir" on remote to not show up
|
||||
root.ForgetPath("otherdir")
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
root.ForgetPath("dir")
|
||||
dm = newDirMap("otherdir/|otherdir/file 1|dir/|dir/file 1|dir/subdir/")
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
run.rm(t, "otherdir/file")
|
||||
run.rmdir(t, "otherdir")
|
||||
run.rm(t, "dir/file")
|
||||
run.rmdir(t, "dir/subdir")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirCacheFlushOnDirRename tests flushing the dir cache on rename
|
||||
func TestDirCacheFlushOnDirRename(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
run.mkdir(t, "dir")
|
||||
run.createFile(t, "dir/file", "1")
|
||||
|
||||
dm := newDirMap("dir/|dir/file 1")
|
||||
localDm := make(dirMap)
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
// expect remotely created directory to not show up
|
||||
err := run.fremote.Mkdir("dir/subdir")
|
||||
require.NoError(t, err)
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
err = os.Rename(run.path("dir"), run.path("rid"))
|
||||
require.NoError(t, err)
|
||||
|
||||
dm = newDirMap("rid/|rid/subdir/|rid/file 1")
|
||||
localDm = make(dirMap)
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
run.rm(t, "rid/file")
|
||||
run.rmdir(t, "rid/subdir")
|
||||
run.rmdir(t, "rid")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
57
cmd/mountlib/mounttest/file.go
Normal file
57
cmd/mountlib/mounttest/file.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestFileModTime tests mod times on files
|
||||
func TestFileModTime(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.createFile(t, "file", "123")
|
||||
|
||||
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
|
||||
err := os.Chtimes(run.path("file"), mtime, mtime)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(run.path("file"))
|
||||
require.NoError(t, err)
|
||||
|
||||
// avoid errors because of timezone differences
|
||||
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
|
||||
|
||||
run.rm(t, "file")
|
||||
}
|
||||
|
||||
// TestFileModTimeWithOpenWriters tests mod time on open files
|
||||
func TestFileModTimeWithOpenWriters(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
|
||||
filepath := run.path("cp-archive-test")
|
||||
|
||||
f, err := os.Create(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.Write([]byte{104, 105})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.Chtimes(filepath, mtime, mtime)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = f.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// avoid errors because of timezone differences
|
||||
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
|
||||
|
||||
run.rm(t, "cp-archive-test")
|
||||
}
|
||||
316
cmd/mountlib/mounttest/fs.go
Normal file
316
cmd/mountlib/mounttest/fs.go
Normal file
@@ -0,0 +1,316 @@
|
||||
// Test suite for rclonefs
|
||||
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
_ "github.com/ncw/rclone/fs/all" // import all the file systems
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
|
||||
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
|
||||
Verbose = flag.Bool("verbose", false, "Set to enable logging")
|
||||
DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
|
||||
DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
|
||||
Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
|
||||
LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries")
|
||||
)
|
||||
|
||||
type (
|
||||
// UnmountFn is called to unmount the file system
|
||||
UnmountFn func() error
|
||||
// MountFn is called to mount the file system
|
||||
MountFn func(f fs.Fs, mountpoint string) (*mountlib.FS, <-chan error, func() error, error)
|
||||
)
|
||||
|
||||
var (
|
||||
mountFn MountFn
|
||||
)
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M, fn MountFn) {
|
||||
mountFn = fn
|
||||
flag.Parse()
|
||||
run = newRun()
|
||||
rc := m.Run()
|
||||
run.Finalise()
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
// Run holds the remotes for a test run
|
||||
type Run struct {
|
||||
filesys *mountlib.FS
|
||||
mountPath string
|
||||
fremote fs.Fs
|
||||
fremoteName string
|
||||
cleanRemote func()
|
||||
umountResult <-chan error
|
||||
umountFn UnmountFn
|
||||
skip bool
|
||||
}
|
||||
|
||||
// run holds the master Run data
|
||||
var run *Run
|
||||
|
||||
// newRun initialise the remote mount for testing and returns a run
|
||||
// object.
|
||||
//
|
||||
// r.fremote is an empty remote Fs
|
||||
//
|
||||
// Finalise() will tidy them away when done.
|
||||
func newRun() *Run {
|
||||
r := &Run{
|
||||
umountResult: make(chan error, 1),
|
||||
}
|
||||
|
||||
// Never ask for passwords, fail instead.
|
||||
// If your local config is encrypted set environment variable
|
||||
// "RCLONE_CONFIG_PASS=hunter2" (or your password)
|
||||
*fs.AskPassword = false
|
||||
fs.LoadConfig()
|
||||
if *Verbose {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
}
|
||||
fs.Config.DumpHeaders = *DumpHeaders
|
||||
fs.Config.DumpBodies = *DumpBodies
|
||||
fs.Config.LowLevelRetries = *LowLevelRetries
|
||||
var err error
|
||||
r.fremote, r.fremoteName, r.cleanRemote, err = fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open remote %q: %v", *RemoteName, err)
|
||||
}
|
||||
|
||||
err = r.fremote.Mkdir("")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open mkdir %q: %v", *RemoteName, err)
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
r.mountPath, err = ioutil.TempDir("", "rclonefs-mount")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create mount dir: %v", err)
|
||||
}
|
||||
} else {
|
||||
// Find a free drive letter
|
||||
drive := ""
|
||||
for letter := 'E'; letter <= 'Z'; letter++ {
|
||||
drive = string(letter) + ":"
|
||||
_, err := os.Stat(drive + "\\")
|
||||
if os.IsNotExist(err) {
|
||||
goto found
|
||||
}
|
||||
}
|
||||
log.Fatalf("Couldn't find free drive letter for test")
|
||||
found:
|
||||
r.mountPath = drive
|
||||
}
|
||||
|
||||
// Mount it up
|
||||
r.mount()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Run) mount() {
|
||||
log.Printf("mount %q %q", r.fremote, r.mountPath)
|
||||
var err error
|
||||
r.filesys, r.umountResult, r.umountFn, err = mountFn(r.fremote, r.mountPath)
|
||||
if err != nil {
|
||||
log.Printf("mount failed: %v", err)
|
||||
r.skip = true
|
||||
}
|
||||
log.Printf("mount OK")
|
||||
}
|
||||
|
||||
func (r *Run) umount() {
|
||||
if r.skip {
|
||||
log.Printf("FUSE not found so skipping umount")
|
||||
return
|
||||
}
|
||||
/*
|
||||
log.Printf("Calling fusermount -u %q", r.mountPath)
|
||||
err := exec.Command("fusermount", "-u", r.mountPath).Run()
|
||||
if err != nil {
|
||||
log.Printf("fusermount failed: %v", err)
|
||||
}
|
||||
*/
|
||||
log.Printf("Unmounting %q", r.mountPath)
|
||||
err := r.umountFn()
|
||||
if err != nil {
|
||||
log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
err = r.umountFn()
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("signal to umount failed: %v", err)
|
||||
}
|
||||
log.Printf("Waiting for umount")
|
||||
err = <-r.umountResult
|
||||
if err != nil {
|
||||
log.Fatalf("umount failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Run) skipIfNoFUSE(t *testing.T) {
|
||||
if r.skip {
|
||||
t.Skip("FUSE not found so skipping test")
|
||||
}
|
||||
}
|
||||
|
||||
// Finalise cleans the remote and unmounts
|
||||
func (r *Run) Finalise() {
|
||||
r.umount()
|
||||
r.cleanRemote()
|
||||
err := os.RemoveAll(r.mountPath)
|
||||
if err != nil {
|
||||
log.Printf("Failed to clean mountPath %q: %v", r.mountPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Run) path(filepath string) string {
|
||||
// return windows drive letter root as E:/
|
||||
if filepath == "" && runtime.GOOS == "windows" {
|
||||
return run.mountPath + "/"
|
||||
}
|
||||
return path.Join(run.mountPath, filepath)
|
||||
}
|
||||
|
||||
type dirMap map[string]struct{}
|
||||
|
||||
// Create a dirMap from a string
|
||||
func newDirMap(dirString string) (dm dirMap) {
|
||||
dm = make(dirMap)
|
||||
for _, entry := range strings.Split(dirString, "|") {
|
||||
if entry != "" {
|
||||
dm[entry] = struct{}{}
|
||||
}
|
||||
}
|
||||
return dm
|
||||
}
|
||||
|
||||
// Returns a dirmap with only the files in
|
||||
func (dm dirMap) filesOnly() dirMap {
|
||||
newDm := make(dirMap)
|
||||
for name := range dm {
|
||||
if !strings.HasSuffix(name, "/") {
|
||||
newDm[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
return newDm
|
||||
}
|
||||
|
||||
// reads the local tree into dir
|
||||
func (r *Run) readLocal(t *testing.T, dir dirMap, filepath string) {
|
||||
realPath := r.path(filepath)
|
||||
files, err := ioutil.ReadDir(realPath)
|
||||
require.NoError(t, err)
|
||||
for _, fi := range files {
|
||||
name := path.Join(filepath, fi.Name())
|
||||
if fi.IsDir() {
|
||||
dir[name+"/"] = struct{}{}
|
||||
r.readLocal(t, dir, name)
|
||||
assert.Equal(t, mountlib.DirPerms, fi.Mode().Perm())
|
||||
} else {
|
||||
dir[fmt.Sprintf("%s %d", name, fi.Size())] = struct{}{}
|
||||
assert.Equal(t, mountlib.FilePerms, fi.Mode().Perm())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reads the remote tree into dir
|
||||
func (r *Run) readRemote(t *testing.T, dir dirMap, filepath string) {
|
||||
objs, dirs, err := fs.WalkGetAll(r.fremote, filepath, true, 1)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
for _, obj := range objs {
|
||||
dir[fmt.Sprintf("%s %d", obj.Remote(), obj.Size())] = struct{}{}
|
||||
}
|
||||
for _, d := range dirs {
|
||||
name := d.Remote()
|
||||
dir[name+"/"] = struct{}{}
|
||||
r.readRemote(t, dir, name)
|
||||
}
|
||||
}
|
||||
|
||||
// checkDir checks the local and remote against the string passed in
|
||||
func (r *Run) checkDir(t *testing.T, dirString string) {
|
||||
dm := newDirMap(dirString)
|
||||
localDm := make(dirMap)
|
||||
r.readLocal(t, localDm, "")
|
||||
remoteDm := make(dirMap)
|
||||
r.readRemote(t, remoteDm, "")
|
||||
// Ignore directories for remote compare
|
||||
assert.Equal(t, dm.filesOnly(), remoteDm.filesOnly(), "expected vs remote")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
}
|
||||
|
||||
func (r *Run) createFile(t *testing.T, filepath string, contents string) {
|
||||
filepath = r.path(filepath)
|
||||
err := ioutil.WriteFile(filepath, []byte(contents), 0600)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (r *Run) readFile(t *testing.T, filepath string) string {
|
||||
filepath = r.path(filepath)
|
||||
result, err := ioutil.ReadFile(filepath)
|
||||
require.NoError(t, err)
|
||||
return string(result)
|
||||
}
|
||||
|
||||
func (r *Run) mkdir(t *testing.T, filepath string) {
|
||||
filepath = r.path(filepath)
|
||||
err := os.Mkdir(filepath, 0700)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (r *Run) rm(t *testing.T, filepath string) {
|
||||
filepath = r.path(filepath)
|
||||
err := os.Remove(filepath)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (r *Run) rmdir(t *testing.T, filepath string) {
|
||||
filepath = r.path(filepath)
|
||||
err := os.Remove(filepath)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestMount checks that the Fs is mounted by seeing if the mountpoint
|
||||
// is in the mount output
|
||||
func TestMount(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
out, err := exec.Command("mount").Output()
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(out), run.mountPath)
|
||||
}
|
||||
|
||||
// TestRoot checks root directory is present and correct
|
||||
func TestRoot(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
fi, err := os.Lstat(run.mountPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.IsDir())
|
||||
assert.Equal(t, fi.Mode().Perm(), mountlib.DirPerms)
|
||||
}
|
||||
125
cmd/mountlib/mounttest/read.go
Normal file
125
cmd/mountlib/mounttest/read.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestReadByByte reads by byte including don't read any bytes
|
||||
func TestReadByByte(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
var data = []byte("hellohello")
|
||||
run.createFile(t, "testfile", string(data))
|
||||
run.checkDir(t, "testfile 10")
|
||||
|
||||
for i := 0; i < len(data); i++ {
|
||||
fd, err := os.Open(run.path("testfile"))
|
||||
assert.NoError(t, err)
|
||||
for j := 0; j < i; j++ {
|
||||
buf := make([]byte, 1)
|
||||
n, err := io.ReadFull(fd, buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, buf[0], data[j])
|
||||
}
|
||||
err = fd.Close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
run.rm(t, "testfile")
|
||||
}
|
||||
|
||||
// TestReadChecksum checks the checksum reading is working
|
||||
func TestReadChecksum(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
// create file big enough so we exceed any single FUSE read
|
||||
// request
|
||||
b := make([]rune, 3*128*1024)
|
||||
for i := range b {
|
||||
b[i] = 'r'
|
||||
}
|
||||
run.createFile(t, "bigfile", string(b))
|
||||
|
||||
// The hash comparison would fail in Flush, if we did not
|
||||
// ensure we read the whole file
|
||||
fd, err := os.Open(run.path("bigfile"))
|
||||
assert.NoError(t, err)
|
||||
buf := make([]byte, 10)
|
||||
_, err = io.ReadFull(fd, buf)
|
||||
assert.NoError(t, err)
|
||||
err = fd.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// The hash comparison would fail, because we only read parts
|
||||
// of the file
|
||||
fd, err = os.Open(run.path("bigfile"))
|
||||
assert.NoError(t, err)
|
||||
// read at start
|
||||
_, err = io.ReadFull(fd, buf)
|
||||
assert.NoError(t, err)
|
||||
// read at end
|
||||
_, err = fd.Seek(int64(len(b)-len(buf)), 0)
|
||||
assert.NoError(t, err)
|
||||
_, err = io.ReadFull(fd, buf)
|
||||
assert.NoError(t, err)
|
||||
// ensure we don't compare hashes
|
||||
err = fd.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
run.rm(t, "bigfile")
|
||||
}
|
||||
|
||||
// TestReadSeek test seeking
|
||||
func TestReadSeek(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
var data = []byte("helloHELLO")
|
||||
run.createFile(t, "testfile", string(data))
|
||||
run.checkDir(t, "testfile 10")
|
||||
|
||||
fd, err := os.Open(run.path("testfile"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Seek to half way
|
||||
_, err = fd.Seek(5, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
buf, err := ioutil.ReadAll(fd)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, buf, []byte("HELLO"))
|
||||
|
||||
// Test seeking to the end
|
||||
_, err = fd.Seek(10, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
buf, err = ioutil.ReadAll(fd)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, buf, []byte(""))
|
||||
|
||||
// Test seeking beyond the end
|
||||
_, err = fd.Seek(1000000, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
buf, err = ioutil.ReadAll(fd)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, buf, []byte(""))
|
||||
|
||||
// Now back to the start
|
||||
_, err = fd.Seek(0, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
buf, err = ioutil.ReadAll(fd)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, buf, []byte("helloHELLO"))
|
||||
|
||||
err = fd.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
run.rm(t, "testfile")
|
||||
}
|
||||
13
cmd/mountlib/mounttest/read_non_unix.go
Normal file
13
cmd/mountlib/mounttest/read_non_unix.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build !linux,!darwin,!freebsd
|
||||
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestReadFileDoubleClose tests double close on read
|
||||
func TestReadFileDoubleClose(t *testing.T) {
|
||||
t.Skip("not supported on " + runtime.GOOS)
|
||||
}
|
||||
53
cmd/mountlib/mounttest/read_unix.go
Normal file
53
cmd/mountlib/mounttest/read_unix.go
Normal file
@@ -0,0 +1,53 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestReadFileDoubleClose tests double close on read
|
||||
func TestReadFileDoubleClose(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.createFile(t, "testdoubleclose", "hello")
|
||||
|
||||
in, err := os.Open(run.path("testdoubleclose"))
|
||||
assert.NoError(t, err)
|
||||
fd := in.Fd()
|
||||
|
||||
fd1, err := syscall.Dup(int(fd))
|
||||
assert.NoError(t, err)
|
||||
|
||||
fd2, err := syscall.Dup(int(fd))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// close one of the dups - should produce no error
|
||||
err = syscall.Close(fd1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// read from the file
|
||||
buf := make([]byte, 1)
|
||||
_, err = in.Read(buf)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// close it
|
||||
err = in.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// read from the other dup - should produce no error as this
|
||||
// file is now buffered
|
||||
n, err := syscall.Read(fd2, buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
|
||||
// close the dup - should not produce an error
|
||||
err = syscall.Close(fd2)
|
||||
assert.NoError(t, err, "input/output error")
|
||||
|
||||
run.rm(t, "testdoubleclose")
|
||||
}
|
||||
81
cmd/mountlib/mounttest/write.go
Normal file
81
cmd/mountlib/mounttest/write.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestWriteFileNoWrite tests writing a file with no write()'s to it
|
||||
func TestWriteFileNoWrite(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
fd, err := os.Create(run.path("testnowrite"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fd.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// FIXME - wait for the Release on the file
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
run.checkDir(t, "testnowrite 0")
|
||||
|
||||
run.rm(t, "testnowrite")
|
||||
}
|
||||
|
||||
// FIXMETestWriteOpenFileInDirListing tests open file in directory listing
|
||||
func FIXMETestWriteOpenFileInDirListing(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
fd, err := os.Create(run.path("testnowrite"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
run.checkDir(t, "testnowrite 0")
|
||||
|
||||
err = fd.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
run.rm(t, "testnowrite")
|
||||
}
|
||||
|
||||
// TestWriteFileWrite tests writing a file and reading it back
|
||||
func TestWriteFileWrite(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.createFile(t, "testwrite", "data")
|
||||
run.checkDir(t, "testwrite 4")
|
||||
contents := run.readFile(t, "testwrite")
|
||||
assert.Equal(t, "data", contents)
|
||||
run.rm(t, "testwrite")
|
||||
}
|
||||
|
||||
// TestWriteFileOverwrite tests overwriting a file
|
||||
func TestWriteFileOverwrite(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.createFile(t, "testwrite", "data")
|
||||
run.checkDir(t, "testwrite 4")
|
||||
run.createFile(t, "testwrite", "potato")
|
||||
contents := run.readFile(t, "testwrite")
|
||||
assert.Equal(t, "potato", contents)
|
||||
run.rm(t, "testwrite")
|
||||
}
|
||||
|
||||
// TestWriteFileFsync tests Fsync
|
||||
//
|
||||
// NB the code for this is in file.go rather than write.go
|
||||
func TestWriteFileFsync(t *testing.T) {
|
||||
filepath := run.path("to be synced")
|
||||
fd, err := os.Create(filepath)
|
||||
require.NoError(t, err)
|
||||
_, err = fd.Write([]byte("hello"))
|
||||
require.NoError(t, err)
|
||||
err = fd.Sync()
|
||||
require.NoError(t, err)
|
||||
err = fd.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
13
cmd/mountlib/mounttest/write_non_unix.go
Normal file
13
cmd/mountlib/mounttest/write_non_unix.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build !linux,!darwin,!freebsd
|
||||
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestWriteFileDoubleClose tests double close on write
|
||||
func TestWriteFileDoubleClose(t *testing.T) {
|
||||
t.Skip("not supported on " + runtime.GOOS)
|
||||
}
|
||||
50
cmd/mountlib/mounttest/write_unix.go
Normal file
50
cmd/mountlib/mounttest/write_unix.go
Normal file
@@ -0,0 +1,50 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestWriteFileDoubleClose tests double close on write
|
||||
func TestWriteFileDoubleClose(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
out, err := os.Create(run.path("testdoubleclose"))
|
||||
assert.NoError(t, err)
|
||||
fd := out.Fd()
|
||||
|
||||
fd1, err := syscall.Dup(int(fd))
|
||||
assert.NoError(t, err)
|
||||
|
||||
fd2, err := syscall.Dup(int(fd))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// close one of the dups - should produce no error
|
||||
err = syscall.Close(fd1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// write to the file
|
||||
buf := []byte("hello")
|
||||
n, err := out.Write(buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
// close it
|
||||
err = out.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// write to the other dup - should produce an error
|
||||
_, err = syscall.Write(fd2, buf)
|
||||
assert.Error(t, err, "input/output error")
|
||||
|
||||
// close the dup - should produce an error
|
||||
err = syscall.Close(fd2)
|
||||
assert.Error(t, err, "input/output error")
|
||||
|
||||
run.rm(t, "testdoubleclose")
|
||||
}
|
||||
273
cmd/mountlib/read.go
Normal file
273
cmd/mountlib/read.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ReadFileHandle is an open for read file handle on a File
|
||||
type ReadFileHandle struct {
|
||||
mu sync.Mutex
|
||||
closed bool // set if handle has been closed
|
||||
r *fs.Account
|
||||
o fs.Object
|
||||
readCalled bool // set if read has been called
|
||||
offset int64
|
||||
noSeek bool
|
||||
file *File
|
||||
hash *fs.MultiHasher
|
||||
opened bool
|
||||
}
|
||||
|
||||
func newReadFileHandle(f *File, o fs.Object) (*ReadFileHandle, error) {
|
||||
var hash *fs.MultiHasher
|
||||
var err error
|
||||
if !f.d.fsys.noChecksum {
|
||||
hash, err = fs.NewMultiHasherTypes(o.Fs().Hashes())
|
||||
if err != nil {
|
||||
fs.Errorf(o.Fs(), "newReadFileHandle hash error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fh := &ReadFileHandle{
|
||||
o: o,
|
||||
noSeek: f.d.fsys.noSeek,
|
||||
file: f,
|
||||
hash: hash,
|
||||
}
|
||||
fs.Stats.Transferring(fh.o.Remote())
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// openPending opens the file if there is a pending open
|
||||
// call with the lock held
|
||||
func (fh *ReadFileHandle) openPending() (err error) {
|
||||
if fh.opened {
|
||||
return nil
|
||||
}
|
||||
r, err := fh.o.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fh.r = fs.NewAccount(r, fh.o).WithBuffer() // account the transfer
|
||||
fh.opened = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
func (fh *ReadFileHandle) String() string {
|
||||
if fh == nil {
|
||||
return "<nil *ReadFileHandle>"
|
||||
}
|
||||
if fh.file == nil {
|
||||
return "<nil *ReadFileHandle.file>"
|
||||
}
|
||||
return fh.file.String() + " (r)"
|
||||
}
|
||||
|
||||
// Node returns the Node assocuated with this - satisfies Noder interface
|
||||
func (fh *ReadFileHandle) Node() Node {
|
||||
return fh.file
|
||||
}
|
||||
|
||||
// seek to a new offset
|
||||
//
|
||||
// if reopen is true, then we won't attempt to use an io.Seeker interface
|
||||
//
|
||||
// Must be called with fh.mu held
|
||||
func (fh *ReadFileHandle) seek(offset int64, reopen bool) (err error) {
|
||||
if fh.noSeek {
|
||||
return ESPIPE
|
||||
}
|
||||
fh.r.StopBuffering() // stop the background reading first
|
||||
fh.hash = nil
|
||||
oldReader := fh.r.GetReader()
|
||||
r := oldReader
|
||||
// Can we seek it directly?
|
||||
if do, ok := oldReader.(io.Seeker); !reopen && ok {
|
||||
fs.Debugf(fh.o, "ReadFileHandle.seek from %d to %d (io.Seeker)", fh.offset, offset)
|
||||
_, err = do.Seek(offset, 0)
|
||||
if err != nil {
|
||||
fs.Debugf(fh.o, "ReadFileHandle.Read io.Seeker failed: %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(fh.o, "ReadFileHandle.seek from %d to %d", fh.offset, offset)
|
||||
// close old one
|
||||
err = oldReader.Close()
|
||||
if err != nil {
|
||||
fs.Debugf(fh.o, "ReadFileHandle.Read seek close old failed: %v", err)
|
||||
}
|
||||
// re-open with a seek
|
||||
r, err = fh.o.Open(&fs.SeekOption{Offset: offset})
|
||||
if err != nil {
|
||||
fs.Debugf(fh.o, "ReadFileHandle.Read seek failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
fh.r.UpdateReader(r)
|
||||
fh.offset = offset
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read from the file handle
|
||||
func (fh *ReadFileHandle) Read(reqSize, reqOffset int64) (respData []byte, err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
err = fh.openPending() // FIXME pending open could be more efficient in the presense of seek (and retried)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// fs.Debugf(fh.o, "ReadFileHandle.Read size %d offset %d", reqSize, reqOffset)
|
||||
if fh.closed {
|
||||
fs.Errorf(fh.o, "ReadFileHandle.Read error: %v", EBADF)
|
||||
return nil, EBADF
|
||||
}
|
||||
doSeek := reqOffset != fh.offset
|
||||
var n int
|
||||
var newOffset int64
|
||||
retries := 0
|
||||
buf := make([]byte, reqSize)
|
||||
doReopen := false
|
||||
for {
|
||||
if doSeek {
|
||||
// Are we attempting to seek beyond the end of the
|
||||
// file - if so just return EOF leaving the underlying
|
||||
// file in an unchanged state.
|
||||
if reqOffset >= fh.o.Size() {
|
||||
fs.Debugf(fh.o, "ReadFileHandle.Read attempt to read beyond end of file: %d > %d", reqOffset, fh.o.Size())
|
||||
return nil, nil
|
||||
}
|
||||
// Otherwise do the seek
|
||||
err = fh.seek(reqOffset, doReopen)
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
if err == nil {
|
||||
if reqSize > 0 {
|
||||
fh.readCalled = true
|
||||
}
|
||||
// One exception to the above is if we fail to fully populate a
|
||||
// page cache page; a read into page cache is always page aligned.
|
||||
// Make sure we never serve a partial read, to avoid that.
|
||||
n, err = io.ReadFull(fh.r, buf)
|
||||
newOffset = fh.offset + int64(n)
|
||||
// if err == nil && rand.Intn(10) == 0 {
|
||||
// err = errors.New("random error")
|
||||
// }
|
||||
if err == nil {
|
||||
break
|
||||
} else if (err == io.ErrUnexpectedEOF || err == io.EOF) && newOffset == fh.o.Size() {
|
||||
// Have read to end of file - reset error
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
if retries >= fs.Config.LowLevelRetries {
|
||||
break
|
||||
}
|
||||
retries++
|
||||
fs.Errorf(fh.o, "ReadFileHandle.Read error: low level retry %d/%d: %v", retries, fs.Config.LowLevelRetries, err)
|
||||
doSeek = true
|
||||
doReopen = true
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(fh.o, "ReadFileHandle.Read error: %v", err)
|
||||
} else {
|
||||
respData = buf[:n]
|
||||
fh.offset = newOffset
|
||||
// fs.Debugf(fh.o, "ReadFileHandle.Read OK")
|
||||
|
||||
if fh.hash != nil {
|
||||
_, err = fh.hash.Write(respData)
|
||||
if err != nil {
|
||||
fs.Errorf(fh.o, "ReadFileHandle.Read HashError: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return respData, err
|
||||
}
|
||||
|
||||
func (fh *ReadFileHandle) checkHash() error {
|
||||
if fh.hash == nil || !fh.readCalled || fh.offset < fh.o.Size() {
|
||||
return nil
|
||||
}
|
||||
|
||||
for hashType, dstSum := range fh.hash.Sums() {
|
||||
srcSum, err := fh.o.Hash(hashType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !fs.HashEquals(dstSum, srcSum) {
|
||||
return errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, dstSum, srcSum)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// close the file handle returning EBADF if it has been
|
||||
// closed already.
|
||||
//
|
||||
// Must be called with fh.mu held
|
||||
func (fh *ReadFileHandle) close() error {
|
||||
if fh.closed {
|
||||
return EBADF
|
||||
}
|
||||
fh.closed = true
|
||||
fs.Stats.DoneTransferring(fh.o.Remote(), true)
|
||||
|
||||
if err := fh.checkHash(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fh.r.Close()
|
||||
}
|
||||
|
||||
// Flush is called each time the file or directory is closed.
|
||||
// Because there can be multiple file descriptors referring to a
|
||||
// single opened file, Flush can be called multiple times.
|
||||
func (fh *ReadFileHandle) Flush() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if !fh.opened {
|
||||
return nil
|
||||
}
|
||||
// fs.Debugf(fh.o, "ReadFileHandle.Flush")
|
||||
|
||||
if err := fh.checkHash(); err != nil {
|
||||
fs.Errorf(fh.o, "ReadFileHandle.Flush error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// fs.Debugf(fh.o, "ReadFileHandle.Flush OK")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Release is called when we are finished with the file handle
|
||||
//
|
||||
// It isn't called directly from userspace so the error is ignored by
|
||||
// the kernel
|
||||
func (fh *ReadFileHandle) Release() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if !fh.opened {
|
||||
return nil
|
||||
}
|
||||
if fh.closed {
|
||||
fs.Debugf(fh.o, "ReadFileHandle.Release nothing to do")
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(fh.o, "ReadFileHandle.Release closing")
|
||||
err := fh.close()
|
||||
if err != nil {
|
||||
fs.Errorf(fh.o, "ReadFileHandle.Release error: %v", err)
|
||||
} else {
|
||||
// fs.Debugf(fh.o, "ReadFileHandle.Release OK")
|
||||
}
|
||||
return err
|
||||
}
|
||||
200
cmd/mountlib/write.go
Normal file
200
cmd/mountlib/write.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// WriteFileHandle is an open for write handle on a File
|
||||
type WriteFileHandle struct {
|
||||
mu sync.Mutex
|
||||
closed bool // set if handle has been closed
|
||||
remote string
|
||||
pipeWriter *io.PipeWriter
|
||||
o fs.Object
|
||||
result chan error
|
||||
file *File
|
||||
writeCalled bool // set the first time Write() is called
|
||||
offset int64
|
||||
hash *fs.MultiHasher
|
||||
}
|
||||
|
||||
func newWriteFileHandle(d *Dir, f *File, src fs.ObjectInfo) (*WriteFileHandle, error) {
|
||||
var hash *fs.MultiHasher
|
||||
if !f.d.fsys.noChecksum {
|
||||
var err error
|
||||
hash, err = fs.NewMultiHasherTypes(src.Fs().Hashes())
|
||||
if err != nil {
|
||||
fs.Errorf(src.Fs(), "newWriteFileHandle hash error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fh := &WriteFileHandle{
|
||||
remote: src.Remote(),
|
||||
result: make(chan error, 1),
|
||||
file: f,
|
||||
hash: hash,
|
||||
}
|
||||
var pipeReader *io.PipeReader
|
||||
pipeReader, fh.pipeWriter = io.Pipe()
|
||||
go func() {
|
||||
r := fs.NewAccountSizeName(pipeReader, 0, src.Remote()).WithBuffer() // account the transfer
|
||||
o, err := d.f.Put(r, src)
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.New Put failed: %v", err)
|
||||
}
|
||||
// Close the Account and thus the pipeReader so the pipeWriter fails with ErrClosedPipe
|
||||
_ = r.Close()
|
||||
fh.o = o
|
||||
fh.result <- err
|
||||
}()
|
||||
fh.file.addWriters(1)
|
||||
fh.file.setSize(0)
|
||||
fs.Stats.Transferring(fh.remote)
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
func (fh *WriteFileHandle) String() string {
|
||||
if fh == nil {
|
||||
return "<nil *WriteFileHandle>"
|
||||
}
|
||||
if fh.file == nil {
|
||||
return "<nil *WriteFileHandle.file>"
|
||||
}
|
||||
return fh.file.String() + " (w)"
|
||||
}
|
||||
|
||||
// Node returns the Node assocuated with this - satisfies Noder interface
|
||||
func (fh *WriteFileHandle) Node() Node {
|
||||
return fh.file
|
||||
}
|
||||
|
||||
// Write data to the file handle
|
||||
func (fh *WriteFileHandle) Write(data []byte, offset int64) (written int64, err error) {
|
||||
// fs.Debugf(fh.remote, "WriteFileHandle.Write len=%d", len(data))
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.offset != offset {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Write can't seek in file")
|
||||
return 0, ESPIPE
|
||||
}
|
||||
if fh.closed {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Write error: %v", EBADF)
|
||||
return 0, EBADF
|
||||
}
|
||||
fh.writeCalled = true
|
||||
// FIXME should probably check the file isn't being seeked?
|
||||
n, err := fh.pipeWriter.Write(data)
|
||||
written = int64(n)
|
||||
fh.offset += written
|
||||
fh.file.setSize(fh.offset)
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Write error: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
// fs.Debugf(fh.remote, "WriteFileHandle.Write OK (%d bytes written)", n)
|
||||
if fh.hash != nil {
|
||||
_, err = fh.hash.Write(data[:n])
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Write HashError: %v", err)
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// Offset returns the offset of the file pointer
|
||||
func (fh *WriteFileHandle) Offset() (offset int64) {
|
||||
return fh.offset
|
||||
}
|
||||
|
||||
// close the file handle returning EBADF if it has been
|
||||
// closed already.
|
||||
//
|
||||
// Must be called with fh.mu held
|
||||
func (fh *WriteFileHandle) close() error {
|
||||
if fh.closed {
|
||||
return EBADF
|
||||
}
|
||||
fh.closed = true
|
||||
fs.Stats.DoneTransferring(fh.remote, true)
|
||||
fh.file.addWriters(-1)
|
||||
writeCloseErr := fh.pipeWriter.Close()
|
||||
err := <-fh.result
|
||||
if err == nil {
|
||||
fh.file.setObject(fh.o)
|
||||
err = writeCloseErr
|
||||
}
|
||||
if err == nil && fh.hash != nil {
|
||||
for hashType, srcSum := range fh.hash.Sums() {
|
||||
dstSum, err := fh.o.Hash(hashType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !fs.HashEquals(srcSum, dstSum) {
|
||||
return errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush is called on each close() of a file descriptor. So if a
|
||||
// filesystem wants to return write errors in close() and the file has
|
||||
// cached dirty data, this is a good place to write back data and
|
||||
// return any errors. Since many applications ignore close() errors
|
||||
// this is not always useful.
|
||||
//
|
||||
// NOTE: The flush() method may be called more than once for each
|
||||
// open(). This happens if more than one file descriptor refers to an
|
||||
// opened file due to dup(), dup2() or fork() calls. It is not
|
||||
// possible to determine if a flush is final, so each flush should be
|
||||
// treated equally. Multiple write-flush sequences are relatively
|
||||
// rare, so this shouldn't be a problem.
|
||||
//
|
||||
// Filesystems shouldn't assume that flush will always be called after
|
||||
// some writes, or that if will be called at all.
|
||||
func (fh *WriteFileHandle) Flush() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
// fs.Debugf(fh.remote, "WriteFileHandle.Flush")
|
||||
// If Write hasn't been called then ignore the Flush - Release
|
||||
// will pick it up
|
||||
if !fh.writeCalled {
|
||||
fs.Debugf(fh.remote, "WriteFileHandle.Flush ignoring flush on unwritten handle")
|
||||
return nil
|
||||
|
||||
}
|
||||
err := fh.close()
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Flush error: %v", err)
|
||||
} else {
|
||||
// fs.Debugf(fh.remote, "WriteFileHandle.Flush OK")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Release is called when we are finished with the file handle
|
||||
//
|
||||
// It isn't called directly from userspace so the error is ignored by
|
||||
// the kernel
|
||||
func (fh *WriteFileHandle) Release() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.closed {
|
||||
fs.Debugf(fh.remote, "WriteFileHandle.Release nothing to do")
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(fh.remote, "WriteFileHandle.Release closing")
|
||||
err := fh.close()
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Release error: %v", err)
|
||||
} else {
|
||||
// fs.Debugf(fh.remote, "WriteFileHandle.Release OK")
|
||||
}
|
||||
return err
|
||||
}
|
||||
41
cmd/move/move.go
Normal file
41
cmd/move/move.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package move
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "move source:path dest:path",
|
||||
Short: `Move files from source to dest.`,
|
||||
Long: `
|
||||
Moves the contents of the source directory to the destination
|
||||
directory. Rclone will error if the source and destination overlap and
|
||||
the remote does not support a server side directory move operation.
|
||||
|
||||
If no filters are in use and if possible this will server side move
|
||||
` + "`source:path`" + ` into ` + "`dest:path`" + `. After this ` + "`source:path`" + ` will no
|
||||
longer longer exist.
|
||||
|
||||
Otherwise for each file in ` + "`source:path`" + ` selected by the filters (if
|
||||
any) this will move it into ` + "`dest:path`" + `. If possible a server side
|
||||
move will be used, otherwise it will copy it (server side if possible)
|
||||
into ` + "`dest:path`" + ` then delete the original (if no errors on copy) in
|
||||
` + "`source:path`" + `.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
--dry-run flag.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
return fs.MoveDir(fdst, fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
57
cmd/moveto/moveto.go
Normal file
57
cmd/moveto/moveto.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package moveto
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "moveto source:path dest:path",
|
||||
Short: `Move file or directory from source to dest.`,
|
||||
Long: `
|
||||
If source:path is a file or directory then it moves it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
This can be used to rename files or upload single files to other than
|
||||
their existing name. If the source is a directory then it acts exacty
|
||||
like the move command.
|
||||
|
||||
So
|
||||
|
||||
rclone moveto src dst
|
||||
|
||||
where src and dst are rclone paths, either remote:path or
|
||||
/path/to/local or C:\windows\path\if\on\windows.
|
||||
|
||||
This will:
|
||||
|
||||
if src is file
|
||||
move it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
move it to dst, overwriting existing files if they exist
|
||||
see move command for full details
|
||||
|
||||
This doesn't transfer unchanged files, testing by size and
|
||||
modification time or MD5SUM. src will be deleted on successful
|
||||
transfer.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
--dry-run flag.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return fs.MoveDir(fdst, fsrc)
|
||||
}
|
||||
return fs.MoveFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
},
|
||||
}
|
||||
546
cmd/ncdu/ncdu.go
Normal file
546
cmd/ncdu/ncdu.go
Normal file
@@ -0,0 +1,546 @@
|
||||
// Package ncdu implements a text based user interface for exploring a remote
|
||||
|
||||
//+build !plan9,!solaris
|
||||
|
||||
package ncdu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/cmd/ncdu/scan"
|
||||
"github.com/ncw/rclone/fs"
|
||||
termbox "github.com/nsf/termbox-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "ncdu remote:path",
|
||||
Short: `Explore a remote with a text based user interface.`,
|
||||
Long: `
|
||||
This displays a text based user interface allowing the navigation of a
|
||||
remote. It is most useful for answering the question - "What is using
|
||||
all my disk space?".
|
||||
|
||||
To make the user interface it first scans the entire remote given and
|
||||
builds an in memory representation. rclone ncdu can be used during
|
||||
this scanning phase and you will see it building up the directory
|
||||
structure as it goes along.
|
||||
|
||||
Here are the keys - press '?' to toggle the help on and off
|
||||
|
||||
` + strings.Join(helpText[1:], "\n ") + `
|
||||
|
||||
This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for
|
||||
rclone remotes. It is missing lots of features at the moment, most
|
||||
importantly deleting files, but is useful as it stands.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return NewUI(fsrc).Show()
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
// help text
|
||||
var helpText = []string{
|
||||
"rclone ncdu",
|
||||
" ↑,↓ or k,j to Move",
|
||||
" →,l to enter",
|
||||
" ←,h to return",
|
||||
" c toggle counts",
|
||||
" g toggle graph",
|
||||
" n,s,C sort by name,size,count",
|
||||
" ? to toggle help on and off",
|
||||
" q/ESC/c-C to quit",
|
||||
}
|
||||
|
||||
// UI contains the state of the user interface
|
||||
type UI struct {
|
||||
f fs.Fs // fs being displayed
|
||||
fsName string // human name of Fs
|
||||
root *scan.Dir // root directory
|
||||
d *scan.Dir // current directory being displayed
|
||||
path string // path of current directory
|
||||
showBox bool // whether to show a box
|
||||
boxText []string // text to show in box
|
||||
entries fs.DirEntries // entries of current directory
|
||||
sortPerm []int // order to display entries in after sorting
|
||||
invSortPerm []int // inverse order
|
||||
dirListHeight int // height of listing
|
||||
listing bool // whether listing is in progress
|
||||
showGraph bool // toggle showing graph
|
||||
showCounts bool // toggle showing counts
|
||||
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
|
||||
sortBySize int8
|
||||
sortByCount int8
|
||||
dirPosMap map[string]dirPos // store for directory positions
|
||||
}
|
||||
|
||||
// Where we have got to in the directory listing
|
||||
type dirPos struct {
|
||||
entry int
|
||||
offset int
|
||||
}
|
||||
|
||||
// Print a string
|
||||
func Print(x, y int, fg, bg termbox.Attribute, msg string) {
|
||||
for _, c := range msg {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x++
|
||||
}
|
||||
}
|
||||
|
||||
// Printf a string
|
||||
func Printf(x, y int, fg, bg termbox.Attribute, format string, args ...interface{}) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
Print(x, y, fg, bg, s)
|
||||
}
|
||||
|
||||
// Line prints a string to given xmax, with given space
|
||||
func Line(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, msg string) {
|
||||
for _, c := range msg {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x++
|
||||
if x >= xmax {
|
||||
return
|
||||
}
|
||||
}
|
||||
for ; x < xmax; x++ {
|
||||
termbox.SetCell(x, y, spacer, fg, bg)
|
||||
}
|
||||
}
|
||||
|
||||
// Linef a string
|
||||
func Linef(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, format string, args ...interface{}) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
Line(x, y, xmax, fg, bg, spacer, s)
|
||||
}
|
||||
|
||||
// Box the u.boxText onto the screen
|
||||
func (u *UI) Box() {
|
||||
w, h := termbox.Size()
|
||||
|
||||
// Find dimensions of text
|
||||
boxWidth := 10
|
||||
for _, s := range u.boxText {
|
||||
if len(s) > boxWidth && len(s) < w-4 {
|
||||
boxWidth = len(s)
|
||||
}
|
||||
}
|
||||
boxHeight := len(u.boxText)
|
||||
|
||||
// position
|
||||
x := (w - boxWidth) / 2
|
||||
y := (h - boxHeight) / 2
|
||||
xmax := x + boxWidth
|
||||
|
||||
// draw text
|
||||
fg, bg := termbox.ColorRed, termbox.ColorWhite
|
||||
for i, s := range u.boxText {
|
||||
Line(x, y+i, xmax, fg, bg, ' ', s)
|
||||
fg = termbox.ColorBlack
|
||||
}
|
||||
|
||||
// FIXME draw a box around
|
||||
}
|
||||
|
||||
// find the biggest entry in the current listing
|
||||
func (u *UI) biggestEntry() (biggest int64) {
|
||||
if u.d == nil {
|
||||
return
|
||||
}
|
||||
for i := range u.entries {
|
||||
size, _, _, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if size > biggest {
|
||||
biggest = size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Draw the current screen
|
||||
func (u *UI) Draw() error {
|
||||
w, h := termbox.Size()
|
||||
u.dirListHeight = h - 3
|
||||
|
||||
// Plot
|
||||
err := termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to clear screen")
|
||||
}
|
||||
|
||||
// Header line
|
||||
Linef(0, 0, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "rclone ncdu %s - use the arrow keys to navigate, press ? for help", fs.Version)
|
||||
|
||||
// Directory line
|
||||
Linef(0, 1, w, termbox.ColorWhite, termbox.ColorBlack, '-', "-- %s ", u.path)
|
||||
|
||||
// graphs
|
||||
const (
|
||||
graphBars = 10
|
||||
graph = "########## "
|
||||
)
|
||||
|
||||
// Directory listing
|
||||
if u.d != nil {
|
||||
y := 2
|
||||
perBar := u.biggestEntry() / graphBars
|
||||
if perBar == 0 {
|
||||
perBar = 1
|
||||
}
|
||||
dirPos := u.dirPosMap[u.path]
|
||||
for i, j := range u.sortPerm[dirPos.offset:] {
|
||||
entry := u.entries[j]
|
||||
n := i + dirPos.offset
|
||||
if y >= h-1 {
|
||||
break
|
||||
}
|
||||
fg := termbox.ColorWhite
|
||||
bg := termbox.ColorBlack
|
||||
if n == dirPos.entry {
|
||||
fg, bg = bg, fg
|
||||
}
|
||||
size, count, isDir, readable := u.d.AttrI(u.sortPerm[n])
|
||||
mark := ' '
|
||||
if isDir {
|
||||
mark = '/'
|
||||
}
|
||||
message := ""
|
||||
if !readable {
|
||||
message = " [not read yet]"
|
||||
}
|
||||
extras := ""
|
||||
if u.showCounts {
|
||||
if count > 0 {
|
||||
extras += fmt.Sprintf("%8v ", fs.SizeSuffix(count))
|
||||
} else {
|
||||
extras += " "
|
||||
}
|
||||
|
||||
}
|
||||
if u.showGraph {
|
||||
bars := (size + perBar/2 - 1) / perBar
|
||||
// clip if necessary - only happens during startup
|
||||
if bars > 10 {
|
||||
bars = 10
|
||||
} else if bars < 0 {
|
||||
bars = 0
|
||||
}
|
||||
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
|
||||
}
|
||||
Linef(0, y, w, fg, bg, ' ', "%8v %s%c%s%s", fs.SizeSuffix(size), extras, mark, path.Base(entry.Remote()), message)
|
||||
y++
|
||||
}
|
||||
}
|
||||
|
||||
// Footer
|
||||
if u.d == nil {
|
||||
Line(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Waiting for root directory...")
|
||||
} else {
|
||||
message := ""
|
||||
if u.listing {
|
||||
message = " [listing in progress]"
|
||||
}
|
||||
size, count := u.d.Attr()
|
||||
Linef(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Total usage: %v, Objects: %d%s", fs.SizeSuffix(size), count, message)
|
||||
}
|
||||
|
||||
// Show the box on top if requred
|
||||
if u.showBox {
|
||||
u.Box()
|
||||
}
|
||||
err = termbox.Flush()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to flush screen")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Move the cursor this many spaces adjusting the viewport as necessary
|
||||
func (u *UI) move(d int) {
|
||||
if u.d == nil {
|
||||
return
|
||||
}
|
||||
|
||||
absD := d
|
||||
if d < 0 {
|
||||
absD = -d
|
||||
}
|
||||
|
||||
entries := len(u.entries)
|
||||
|
||||
// Fetch current dirPos
|
||||
dirPos := u.dirPosMap[u.path]
|
||||
|
||||
dirPos.entry += d
|
||||
|
||||
// check entry in range
|
||||
if dirPos.entry < 0 {
|
||||
dirPos.entry = 0
|
||||
} else if dirPos.entry >= entries {
|
||||
dirPos.entry = entries - 1
|
||||
}
|
||||
|
||||
// check cursor still on screen
|
||||
p := dirPos.entry - dirPos.offset // where dirPos.entry appears on the screen
|
||||
if p < 0 {
|
||||
dirPos.offset -= absD
|
||||
} else if p >= u.dirListHeight {
|
||||
dirPos.offset += absD
|
||||
}
|
||||
|
||||
// check dirPos.offset in bounds
|
||||
if dirPos.offset < 0 {
|
||||
dirPos.offset = 0
|
||||
} else if dirPos.offset >= entries {
|
||||
dirPos.offset = entries - 1
|
||||
}
|
||||
|
||||
// write dirPos back for later
|
||||
u.dirPosMap[u.path] = dirPos
|
||||
}
|
||||
|
||||
// Sort by the configured sort method
|
||||
type ncduSort struct {
|
||||
sortPerm []int
|
||||
entries fs.DirEntries
|
||||
d *scan.Dir
|
||||
u *UI
|
||||
}
|
||||
|
||||
// Less is part of sort.Interface.
|
||||
func (ds *ncduSort) Less(i, j int) bool {
|
||||
isize, icount, _, _ := ds.d.AttrI(ds.sortPerm[i])
|
||||
jsize, jcount, _, _ := ds.d.AttrI(ds.sortPerm[j])
|
||||
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
|
||||
switch {
|
||||
case ds.u.sortByName < 0:
|
||||
return iname > jname
|
||||
case ds.u.sortByName > 0:
|
||||
break
|
||||
case ds.u.sortBySize < 0:
|
||||
if isize != jsize {
|
||||
return isize < jsize
|
||||
}
|
||||
case ds.u.sortBySize > 0:
|
||||
if isize != jsize {
|
||||
return isize > jsize
|
||||
}
|
||||
case ds.u.sortByCount < 0:
|
||||
if icount != jcount {
|
||||
return icount < jcount
|
||||
}
|
||||
case ds.u.sortByCount > 0:
|
||||
if icount != jcount {
|
||||
return icount > jcount
|
||||
}
|
||||
}
|
||||
// if everything equal, sort by name
|
||||
return iname < jname
|
||||
}
|
||||
|
||||
// Swap is part of sort.Interface.
|
||||
func (ds *ncduSort) Swap(i, j int) {
|
||||
ds.sortPerm[i], ds.sortPerm[j] = ds.sortPerm[j], ds.sortPerm[i]
|
||||
}
|
||||
|
||||
// Len is part of sort.Interface.
|
||||
func (ds *ncduSort) Len() int {
|
||||
return len(ds.sortPerm)
|
||||
}
|
||||
|
||||
// sort the permutation map of the current directory
|
||||
func (u *UI) sortCurrentDir() {
|
||||
u.sortPerm = u.sortPerm[:0]
|
||||
for i := range u.entries {
|
||||
u.sortPerm = append(u.sortPerm, i)
|
||||
}
|
||||
data := ncduSort{
|
||||
sortPerm: u.sortPerm,
|
||||
entries: u.entries,
|
||||
d: u.d,
|
||||
u: u,
|
||||
}
|
||||
sort.Sort(&data)
|
||||
if len(u.invSortPerm) < len(u.sortPerm) {
|
||||
u.invSortPerm = make([]int, len(u.sortPerm))
|
||||
}
|
||||
for i, j := range u.sortPerm {
|
||||
u.invSortPerm[j] = i
|
||||
}
|
||||
}
|
||||
|
||||
// setCurrentDir sets the current directory
|
||||
func (u *UI) setCurrentDir(d *scan.Dir) {
|
||||
u.d = d
|
||||
u.entries = d.Entries()
|
||||
u.path = path.Join(u.fsName, d.Path())
|
||||
u.sortCurrentDir()
|
||||
}
|
||||
|
||||
// enters the current entry
|
||||
func (u *UI) enter() {
|
||||
if u.d == nil {
|
||||
return
|
||||
}
|
||||
dirPos := u.dirPosMap[u.path]
|
||||
d, _ := u.d.GetDir(u.sortPerm[dirPos.entry])
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
u.setCurrentDir(d)
|
||||
}
|
||||
|
||||
// up goes up to the parent directory
|
||||
func (u *UI) up() {
|
||||
if u.d == nil {
|
||||
return
|
||||
}
|
||||
parent := u.d.Parent()
|
||||
if parent != nil {
|
||||
u.setCurrentDir(parent)
|
||||
}
|
||||
}
|
||||
|
||||
// popupBox shows a box with the text in
|
||||
func (u *UI) popupBox(text []string) {
|
||||
u.boxText = text
|
||||
u.showBox = true
|
||||
}
|
||||
|
||||
// togglePopupBox shows a box with the text in
|
||||
func (u *UI) togglePopupBox(text []string) {
|
||||
if u.showBox {
|
||||
u.showBox = false
|
||||
} else {
|
||||
u.popupBox(text)
|
||||
}
|
||||
}
|
||||
|
||||
// toggle the sorting for the flag passed in
|
||||
func (u *UI) toggleSort(sortType *int8) {
|
||||
old := *sortType
|
||||
u.sortBySize = 0
|
||||
u.sortByCount = 0
|
||||
u.sortByName = 0
|
||||
if old == 0 {
|
||||
*sortType = 1
|
||||
} else {
|
||||
*sortType = -old
|
||||
}
|
||||
u.sortCurrentDir()
|
||||
}
|
||||
|
||||
// NewUI creates a new user interface for ncdu on f
|
||||
func NewUI(f fs.Fs) *UI {
|
||||
return &UI{
|
||||
f: f,
|
||||
path: "Waiting for root...",
|
||||
dirListHeight: 20, // updated in Draw
|
||||
fsName: f.Name() + ":" + f.Root(),
|
||||
showGraph: true,
|
||||
showCounts: false,
|
||||
sortByName: 0, // +1 for normal, 0 for off, -1 for reverse
|
||||
sortBySize: 1,
|
||||
sortByCount: 0,
|
||||
dirPosMap: make(map[string]dirPos),
|
||||
}
|
||||
}
|
||||
|
||||
// Show shows the user interface
|
||||
func (u *UI) Show() error {
|
||||
err := termbox.Init()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer termbox.Close()
|
||||
|
||||
// scan the disk in the background
|
||||
u.listing = true
|
||||
rootChan, errChan, updated := scan.Scan(u.f)
|
||||
|
||||
// Poll the events into a channel
|
||||
events := make(chan termbox.Event)
|
||||
doneWithEvent := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
events <- termbox.PollEvent()
|
||||
<-doneWithEvent
|
||||
}
|
||||
}()
|
||||
|
||||
// Main loop, waiting for events and channels
|
||||
outer:
|
||||
for {
|
||||
//Reset()
|
||||
err := u.Draw()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "draw failed")
|
||||
}
|
||||
var root *scan.Dir
|
||||
select {
|
||||
case root = <-rootChan:
|
||||
u.root = root
|
||||
u.setCurrentDir(root)
|
||||
case err := <-errChan:
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ncdu directory listing")
|
||||
}
|
||||
u.listing = false
|
||||
case <-updated:
|
||||
// redraw
|
||||
// might want to limit updates per second
|
||||
u.sortCurrentDir()
|
||||
case ev := <-events:
|
||||
doneWithEvent <- true
|
||||
if ev.Type == termbox.EventKey {
|
||||
switch ev.Key + termbox.Key(ev.Ch) {
|
||||
case termbox.KeyEsc, termbox.KeyCtrlC, 'q':
|
||||
if u.showBox {
|
||||
u.showBox = false
|
||||
} else {
|
||||
break outer
|
||||
}
|
||||
case termbox.KeyArrowDown, 'j':
|
||||
u.move(1)
|
||||
case termbox.KeyArrowUp, 'k':
|
||||
u.move(-1)
|
||||
case termbox.KeyPgdn, '-', '_':
|
||||
u.move(u.dirListHeight)
|
||||
case termbox.KeyPgup, '=', '+':
|
||||
u.move(-u.dirListHeight)
|
||||
case termbox.KeyArrowLeft, 'h':
|
||||
u.up()
|
||||
case termbox.KeyArrowRight, 'l', termbox.KeyEnter:
|
||||
u.enter()
|
||||
case 'c':
|
||||
u.showCounts = !u.showCounts
|
||||
case 'g':
|
||||
u.showGraph = !u.showGraph
|
||||
case 'n':
|
||||
u.toggleSort(&u.sortByName)
|
||||
case 's':
|
||||
u.toggleSort(&u.sortBySize)
|
||||
case 'C':
|
||||
u.toggleSort(&u.sortByCount)
|
||||
case '?':
|
||||
u.togglePopupBox(helpText)
|
||||
}
|
||||
}
|
||||
}
|
||||
// listen to key presses, etc
|
||||
}
|
||||
return nil
|
||||
}
|
||||
6
cmd/ncdu/ncdu_unsupported.go
Normal file
6
cmd/ncdu/ncdu_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for ncdu for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris
|
||||
|
||||
package ncdu
|
||||
167
cmd/ncdu/scan/scan.go
Normal file
167
cmd/ncdu/scan/scan.go
Normal file
@@ -0,0 +1,167 @@
|
||||
// Package scan does concurrent scanning of an Fs building up a directory tree.
|
||||
package scan
|
||||
|
||||
import (
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Dir represents a directory found in the remote
|
||||
type Dir struct {
|
||||
parent *Dir
|
||||
path string
|
||||
mu sync.Mutex
|
||||
count int64
|
||||
size int64
|
||||
complete bool
|
||||
entries fs.DirEntries
|
||||
dirs map[string]*Dir
|
||||
offset int // current listing offset
|
||||
entry int // current listing entry
|
||||
}
|
||||
|
||||
// Parent returns the directory above this one
|
||||
func (d *Dir) Parent() *Dir {
|
||||
// no locking needed since these are write once in newDir()
|
||||
return d.parent
|
||||
}
|
||||
|
||||
// Path returns the position of the dir in the filesystem
|
||||
func (d *Dir) Path() string {
|
||||
// no locking needed since these are write once in newDir()
|
||||
return d.path
|
||||
}
|
||||
|
||||
// make a new directory
|
||||
func newDir(parent *Dir, dirPath string, entries fs.DirEntries) *Dir {
|
||||
d := &Dir{
|
||||
parent: parent,
|
||||
path: dirPath,
|
||||
entries: entries,
|
||||
dirs: make(map[string]*Dir),
|
||||
}
|
||||
// Count size in this dir
|
||||
for _, entry := range entries {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
d.count++
|
||||
d.size += o.Size()
|
||||
}
|
||||
}
|
||||
// Set my directory entry in parent
|
||||
if parent != nil {
|
||||
parent.mu.Lock()
|
||||
leaf := path.Base(dirPath)
|
||||
d.parent.dirs[leaf] = d
|
||||
parent.mu.Unlock()
|
||||
}
|
||||
// Accumulate counts in parents
|
||||
for ; parent != nil; parent = parent.parent {
|
||||
parent.mu.Lock()
|
||||
parent.count += d.count
|
||||
parent.size += d.size
|
||||
parent.mu.Unlock()
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// Entries returns a copy of the entries in the directory
|
||||
func (d *Dir) Entries() fs.DirEntries {
|
||||
return append(fs.DirEntries(nil), d.entries...)
|
||||
}
|
||||
|
||||
// gets the directory of the i-th entry
|
||||
//
|
||||
// returns nil if it is a file
|
||||
// returns a flag as to whether is directory or not
|
||||
//
|
||||
// Call with d.mu held
|
||||
func (d *Dir) getDir(i int) (subDir *Dir, isDir bool) {
|
||||
obj := d.entries[i]
|
||||
dir, ok := obj.(fs.Directory)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
leaf := path.Base(dir.Remote())
|
||||
subDir = d.dirs[leaf]
|
||||
return subDir, true
|
||||
}
|
||||
|
||||
// GetDir returns the Dir of the i-th entry
|
||||
//
|
||||
// returns nil if it is a file
|
||||
// returns a flag as to whether is directory or not
|
||||
func (d *Dir) GetDir(i int) (subDir *Dir, isDir bool) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
return d.getDir(i)
|
||||
}
|
||||
|
||||
// Attr returns the size and count for the directory
|
||||
func (d *Dir) Attr() (size int64, count int64) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
return d.size, d.count
|
||||
}
|
||||
|
||||
// AttrI returns the size, count and flags for the i-th directory entry
|
||||
func (d *Dir) AttrI(i int) (size int64, count int64, isDir bool, readable bool) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
subDir, isDir := d.getDir(i)
|
||||
if !isDir {
|
||||
return d.entries[i].Size(), 0, false, true
|
||||
}
|
||||
if subDir == nil {
|
||||
return 0, 0, true, false
|
||||
}
|
||||
size, count = subDir.Attr()
|
||||
return size, count, true, true
|
||||
}
|
||||
|
||||
// Scan the Fs passed in, returning a root directory channel and an
|
||||
// error channel
|
||||
func Scan(f fs.Fs) (chan *Dir, chan error, chan struct{}) {
|
||||
root := make(chan *Dir, 1)
|
||||
errChan := make(chan error, 1)
|
||||
updated := make(chan struct{}, 1)
|
||||
go func() {
|
||||
parents := map[string]*Dir{}
|
||||
err := fs.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err // FIXME mark directory as errored instead of aborting
|
||||
}
|
||||
var parent *Dir
|
||||
if dirPath != "" {
|
||||
parentPath := path.Dir(dirPath)
|
||||
if parentPath == "." {
|
||||
parentPath = ""
|
||||
}
|
||||
var ok bool
|
||||
parent, ok = parents[parentPath]
|
||||
if !ok {
|
||||
errChan <- errors.Errorf("couldn't find parent for %q", dirPath)
|
||||
}
|
||||
}
|
||||
d := newDir(parent, dirPath, entries)
|
||||
parents[dirPath] = d
|
||||
if dirPath == "" {
|
||||
root <- d
|
||||
}
|
||||
// Mark updated
|
||||
select {
|
||||
case updated <- struct{}{}:
|
||||
default:
|
||||
break
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
errChan <- errors.Wrap(err, "ncdu listing failed")
|
||||
}
|
||||
errChan <- nil
|
||||
}()
|
||||
return root, errChan, updated
|
||||
}
|
||||
26
cmd/obscure/obscure.go
Normal file
26
cmd/obscure/obscure.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package obscure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "obscure password",
|
||||
Short: `Obscure password for use in the rclone.conf`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
obscure := fs.MustObscure(args[0])
|
||||
fmt.Println(obscure)
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
28
cmd/purge/purge.go
Normal file
28
cmd/purge/purge.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package purge
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "purge remote:path",
|
||||
Short: `Remove the path and all of its contents.`,
|
||||
Long: `
|
||||
Remove the path and all of its contents. Note that this does not obey
|
||||
include/exclude filters - everything will be removed. Use ` + "`" + `delete` + "`" + ` if
|
||||
you want to selectively delete files.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst := cmd.NewFsDst(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return fs.Purge(fdst)
|
||||
})
|
||||
},
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user