mirror of
https://github.com/rclone/rclone.git
synced 2026-01-12 21:43:13 +00:00
Compare commits
1742 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f34a9116d4 | ||
|
|
64ea94c1a4 | ||
|
|
4eac50eb83 | ||
|
|
5683f74025 | ||
|
|
fe71d4fd87 | ||
|
|
a64d92bd35 | ||
|
|
c5cf0792f2 | ||
|
|
255d3e925d | ||
|
|
0d4bff8239 | ||
|
|
4ba58884b1 | ||
|
|
8839e4ee33 | ||
|
|
ebbe77f525 | ||
|
|
6f1ae00c7f | ||
|
|
6b5989712f | ||
|
|
29d34426bc | ||
|
|
2a01fa9fa0 | ||
|
|
4c0e2f9b3b | ||
|
|
240c97cd7a | ||
|
|
2fd0bec4e4 | ||
|
|
7e585cda96 | ||
|
|
1b1593a894 | ||
|
|
9c242edc10 | ||
|
|
0914ec316c | ||
|
|
2cf808c825 | ||
|
|
66558213e0 | ||
|
|
84701e376a | ||
|
|
829dd1ad25 | ||
|
|
7c972d375b | ||
|
|
3d2f3d9a7f | ||
|
|
845b22a628 | ||
|
|
3684585104 | ||
|
|
f424019380 | ||
|
|
ab03f6e475 | ||
|
|
b48b537325 | ||
|
|
b05e472d2e | ||
|
|
5061aaaf46 | ||
|
|
e00616b016 | ||
|
|
09f203f62b | ||
|
|
2965cbe264 | ||
|
|
bb3ba7b314 | ||
|
|
f12512dd13 | ||
|
|
25b073c767 | ||
|
|
ebd7780188 | ||
|
|
fa4a25a73b | ||
|
|
934df67aef | ||
|
|
006b296c34 | ||
|
|
38b85e94ea | ||
|
|
4b185355df | ||
|
|
7d15c33e42 | ||
|
|
11332a19a0 | ||
|
|
a1f8318b29 | ||
|
|
e767c9ac9f | ||
|
|
56cfb810a8 | ||
|
|
835ca15ec8 | ||
|
|
4af4bbb539 | ||
|
|
47450ba326 | ||
|
|
639e812789 | ||
|
|
1c6cad2252 | ||
|
|
6d3df6f172 | ||
|
|
c16ac697a9 | ||
|
|
0978957a2e | ||
|
|
d1b19f975d | ||
|
|
aab8051f50 | ||
|
|
1248beb0b2 | ||
|
|
6448c445f5 | ||
|
|
fdb01437d8 | ||
|
|
729e1305b7 | ||
|
|
02ffd43572 | ||
|
|
e53892f53b | ||
|
|
6c62fced60 | ||
|
|
c64ad851af | ||
|
|
4c116af1d0 | ||
|
|
8357a82eee | ||
|
|
483f4b8ad9 | ||
|
|
6f61da5c75 | ||
|
|
159fce0106 | ||
|
|
569c1a2ec1 | ||
|
|
2497ca5134 | ||
|
|
cbe5d7ce64 | ||
|
|
1a65a4e769 | ||
|
|
bcf1ece43b | ||
|
|
b4aa920a3d | ||
|
|
41a97e39c8 | ||
|
|
abbcb2f5e0 | ||
|
|
cb6de4a2cf | ||
|
|
dc1c679c65 | ||
|
|
3fb4fe31d2 | ||
|
|
76b151984c | ||
|
|
f0ed384786 | ||
|
|
f80f7a0509 | ||
|
|
af50f31f7d | ||
|
|
8e2213fbbd | ||
|
|
085c690798 | ||
|
|
2b666187a6 | ||
|
|
00b46a8b96 | ||
|
|
b21f227bd3 | ||
|
|
e98e550021 | ||
|
|
60945d0a37 | ||
|
|
b4083b4371 | ||
|
|
eb3415db50 | ||
|
|
9fbd8a6419 | ||
|
|
9738f8532b | ||
|
|
a5b034a992 | ||
|
|
321b6da7af | ||
|
|
1b22ee5b93 | ||
|
|
eab55ce882 | ||
|
|
61b6159a05 | ||
|
|
c560017934 | ||
|
|
7c3584f4e6 | ||
|
|
981cfb1bec | ||
|
|
992647b157 | ||
|
|
dec21ccf63 | ||
|
|
94adf4f43b | ||
|
|
e7f2935333 | ||
|
|
f5f8c0c438 | ||
|
|
60cdcf784c | ||
|
|
57a5c67729 | ||
|
|
d7908c06c9 | ||
|
|
8951875c21 | ||
|
|
05a1e1532b | ||
|
|
7f20e1d7f3 | ||
|
|
bb0ce0cb5f | ||
|
|
e946a8eab0 | ||
|
|
a0cfa0929b | ||
|
|
3fb1e96988 | ||
|
|
46947b3b9b | ||
|
|
de98e2480d | ||
|
|
3cf7c61aa0 | ||
|
|
d8b3bf014d | ||
|
|
0bfa29cbcf | ||
|
|
6cc968b085 | ||
|
|
ce5b3a531d | ||
|
|
5acb6f47e7 | ||
|
|
409ba56fde | ||
|
|
5d875e8840 | ||
|
|
429bb7e8b8 | ||
|
|
7d3abdc463 | ||
|
|
538246f6c3 | ||
|
|
557dd8f031 | ||
|
|
37aaa19f3a | ||
|
|
cef2e3bf83 | ||
|
|
a3a436ce16 | ||
|
|
5d05df3124 | ||
|
|
421ba84e12 | ||
|
|
7ae7080824 | ||
|
|
31d2fb4e11 | ||
|
|
704e82aab1 | ||
|
|
fc352c1ff6 | ||
|
|
e491093cd1 | ||
|
|
016abf825e | ||
|
|
0c942199c9 | ||
|
|
aec2265be0 | ||
|
|
2423fa40e2 | ||
|
|
4355f3fe97 | ||
|
|
9fbff7bcab | ||
|
|
413faa99cf | ||
|
|
ed91d6b5a5 | ||
|
|
c65734ee69 | ||
|
|
8c8abfd6dc | ||
|
|
dfaee55ef3 | ||
|
|
72072d7d6b | ||
|
|
f1287e13f7 | ||
|
|
7749157596 | ||
|
|
682b4d54c5 | ||
|
|
245edd1b0e | ||
|
|
4d081ec87e | ||
|
|
a8dfc5ce3b | ||
|
|
68d0b5adbb | ||
|
|
c4ad3ac94c | ||
|
|
16e16bc220 | ||
|
|
73dfa21ba3 | ||
|
|
c31556c6d1 | ||
|
|
2083ac6e2a | ||
|
|
22ee839d05 | ||
|
|
5634659ea3 | ||
|
|
e18122e88b | ||
|
|
07ec8073fe | ||
|
|
8184ec4b70 | ||
|
|
190367d917 | ||
|
|
a5dc62f6c1 | ||
|
|
3e0c91ba4b | ||
|
|
7e065440fb | ||
|
|
e8883e9fdb | ||
|
|
1a8f824bad | ||
|
|
c1aaff220d | ||
|
|
6da6b2556b | ||
|
|
ca19fd2d7e | ||
|
|
2fac74b517 | ||
|
|
8b6daaa877 | ||
|
|
3af9d63261 | ||
|
|
c6cd2a5280 | ||
|
|
0bb84efe75 | ||
|
|
3ec15ac2bd | ||
|
|
750690503e | ||
|
|
54950d3423 | ||
|
|
014aa3d157 | ||
|
|
cc7ed13b9b | ||
|
|
6552581a17 | ||
|
|
f60e2a7aac | ||
|
|
cacae8d12d | ||
|
|
4a1013f2de | ||
|
|
d0b9baab13 | ||
|
|
96665c16cb | ||
|
|
39b9f80302 | ||
|
|
1602a3a055 | ||
|
|
fafaea7edc | ||
|
|
e6fb96cfd4 | ||
|
|
e612673ea0 | ||
|
|
fd2406f94e | ||
|
|
cd146415d1 | ||
|
|
2740c965c0 | ||
|
|
6669165b6b | ||
|
|
a06bcd4c57 | ||
|
|
6df1f6fad1 | ||
|
|
683befaec1 | ||
|
|
10f27e2ff2 | ||
|
|
d121a94c20 | ||
|
|
567071750b | ||
|
|
115053930e | ||
|
|
ef1346602e | ||
|
|
9417194751 | ||
|
|
69ba806528 | ||
|
|
ae9d58d625 | ||
|
|
d6bab0169f | ||
|
|
d7dd6f3814 | ||
|
|
edfab09eb9 | ||
|
|
0575623dff | ||
|
|
fc8b13c993 | ||
|
|
b531bf1349 | ||
|
|
43ced30f11 | ||
|
|
106bc1c9fc | ||
|
|
f64ee433b7 | ||
|
|
3eb7f52e39 | ||
|
|
7f3dc9b5c4 | ||
|
|
bcdd79320b | ||
|
|
2453abfbea | ||
|
|
efd88c5676 | ||
|
|
4966611866 | ||
|
|
00fe6d95da | ||
|
|
b7521c0fe2 | ||
|
|
a1d942e5c3 | ||
|
|
9e9297838f | ||
|
|
6403242f48 | ||
|
|
737cf3d957 | ||
|
|
8f2f480628 | ||
|
|
a5e0115b19 | ||
|
|
63d0734c71 | ||
|
|
b017fcfe9a | ||
|
|
911d121bb9 | ||
|
|
1c10497b68 | ||
|
|
d96e45ba5b | ||
|
|
657b3a674d | ||
|
|
5177d8c854 | ||
|
|
b2b989434d | ||
|
|
3e9861eecf | ||
|
|
3fc69f4140 | ||
|
|
b1e85c7ceb | ||
|
|
1d994f7330 | ||
|
|
0e76e35b6f | ||
|
|
29e2744155 | ||
|
|
6390bb2b09 | ||
|
|
6f2a6dfbc5 | ||
|
|
b6684ea4f5 | ||
|
|
2857ed5c35 | ||
|
|
8771d352d4 | ||
|
|
748c9f5cb7 | ||
|
|
646a419453 | ||
|
|
c98dfa2556 | ||
|
|
7195e44dce | ||
|
|
c9e2739500 | ||
|
|
2d8e75cab4 | ||
|
|
5a3a56abd8 | ||
|
|
7b89a5f656 | ||
|
|
a4396ebe0f | ||
|
|
85877f3adc | ||
|
|
87335de8a8 | ||
|
|
12405f9f41 | ||
|
|
168b0a0ecb | ||
|
|
234bfae0d5 | ||
|
|
4ac9a65049 | ||
|
|
a8e41f081c | ||
|
|
261c7ad9e4 | ||
|
|
fe96d5cf0a | ||
|
|
8574129892 | ||
|
|
6df12b3f00 | ||
|
|
7f8d306c9c | ||
|
|
9d3f11b493 | ||
|
|
38cc211762 | ||
|
|
e0eabc75c0 | ||
|
|
798502b204 | ||
|
|
9d22f4208f | ||
|
|
56dedc49e3 | ||
|
|
2f0551074c | ||
|
|
d6eb625815 | ||
|
|
4c45cbea18 | ||
|
|
897690d997 | ||
|
|
5a1351f141 | ||
|
|
c22be38747 | ||
|
|
f91f89d409 | ||
|
|
113f43ec42 | ||
|
|
7ef18b6b35 | ||
|
|
a91448c83a | ||
|
|
80b1f2a494 | ||
|
|
57817397a0 | ||
|
|
10fa2a7806 | ||
|
|
9a62d2f8ad | ||
|
|
49816e67bd | ||
|
|
fe536f3fa8 | ||
|
|
c54d513bdd | ||
|
|
dd975ab00d | ||
|
|
2944f7603d | ||
|
|
58f7b4ed7c | ||
|
|
cbea06026a | ||
|
|
8207af9460 | ||
|
|
921fcc0723 | ||
|
|
445fc55772 | ||
|
|
09fbbdbb04 | ||
|
|
4b0e983323 | ||
|
|
ee9f987234 | ||
|
|
f407e3da55 | ||
|
|
f1f7e0e6f9 | ||
|
|
7e93567b18 | ||
|
|
2c8d6e86cc | ||
|
|
bb6300b032 | ||
|
|
e96c5b5f39 | ||
|
|
672c410235 | ||
|
|
459cf64403 | ||
|
|
0158ab6926 | ||
|
|
4e189fe6e7 | ||
|
|
b78ecb1568 | ||
|
|
a122b9fa7a | ||
|
|
323daae63e | ||
|
|
e754f50778 | ||
|
|
034cf22d4d | ||
|
|
2cc9071791 | ||
|
|
b510c70c1e | ||
|
|
001431d326 | ||
|
|
e64435a5c1 | ||
|
|
9c47b767b4 | ||
|
|
2870874329 | ||
|
|
d54fca4e58 | ||
|
|
dcbf538416 | ||
|
|
5b79922b5e | ||
|
|
41b2645dec | ||
|
|
76226e0147 | ||
|
|
76c5aa8533 | ||
|
|
265fb8a5e2 | ||
|
|
8a1a900733 | ||
|
|
20ae7d562b | ||
|
|
c1bfdd893f | ||
|
|
ec2ea37ad2 | ||
|
|
bced73c947 | ||
|
|
5b6585f57d | ||
|
|
c6b844977a | ||
|
|
47eab397ba | ||
|
|
bfe812ea6b | ||
|
|
db1995e63a | ||
|
|
81a2ab599f | ||
|
|
74687c25f5 | ||
|
|
d025066fae | ||
|
|
80ce569874 | ||
|
|
ee13ea74f1 | ||
|
|
40f24e0ea3 | ||
|
|
b523cfc01d | ||
|
|
38dabcf6b2 | ||
|
|
ee6a35d750 | ||
|
|
92d2e1f8d7 | ||
|
|
98d238daa4 | ||
|
|
036fd61a50 | ||
|
|
91cfcc21ff | ||
|
|
132f71d504 | ||
|
|
861e125a4f | ||
|
|
230e65313a | ||
|
|
8a185deefa | ||
|
|
7b9557df90 | ||
|
|
ec5b72f8d5 | ||
|
|
466dd22b44 | ||
|
|
f682002b84 | ||
|
|
7d34caac83 | ||
|
|
28a18303f3 | ||
|
|
3e3a59768e | ||
|
|
d4b9bb9894 | ||
|
|
e01741b557 | ||
|
|
7ec24ad67a | ||
|
|
eff10bbc1d | ||
|
|
73f7278497 | ||
|
|
6d59887487 | ||
|
|
21aca68680 | ||
|
|
214f5e6411 | ||
|
|
2b5ce6ef51 | ||
|
|
b0fd187cba | ||
|
|
c3cd247d4b | ||
|
|
5d911e9450 | ||
|
|
a56d51c594 | ||
|
|
ef328c5497 | ||
|
|
49e4cdb8b9 | ||
|
|
ee52365e88 | ||
|
|
f3060caf04 | ||
|
|
bfef0bc2e9 | ||
|
|
da9926d574 | ||
|
|
ebc8361933 | ||
|
|
71fe046937 | ||
|
|
d5ff7104e5 | ||
|
|
cd4895690a | ||
|
|
1ecf2bcbd5 | ||
|
|
c3d6cc91ec | ||
|
|
6fce1ac267 | ||
|
|
9f24639568 | ||
|
|
8b30023f0d | ||
|
|
c507836617 | ||
|
|
6152bab28d | ||
|
|
6ae29df4d7 | ||
|
|
de54fd4c64 | ||
|
|
859721f3cf | ||
|
|
d134d78979 | ||
|
|
7b81f12dad | ||
|
|
d279161cee | ||
|
|
b5bf819256 | ||
|
|
384724fd11 | ||
|
|
5f70746d39 | ||
|
|
088806ba4c | ||
|
|
45ba4ed594 | ||
|
|
edfa1b3a69 | ||
|
|
db6009126d | ||
|
|
5255cbf5e3 | ||
|
|
eb87cf6f12 | ||
|
|
0b6fba34a3 | ||
|
|
c8b5ee1e54 | ||
|
|
a73ecec11f | ||
|
|
c223464cd0 | ||
|
|
39d09c04a2 | ||
|
|
db5494b316 | ||
|
|
c3dab09a94 | ||
|
|
3ddcbce989 | ||
|
|
0cf19ef66a | ||
|
|
655891170f | ||
|
|
93423a0812 | ||
|
|
78f33f5d6e | ||
|
|
209b7da3b2 | ||
|
|
6f71260acf | ||
|
|
ec6c3f2686 | ||
|
|
62e28d0a72 | ||
|
|
470642f2b7 | ||
|
|
b5002eb6a4 | ||
|
|
ee5698b3a9 | ||
|
|
728ff231ab | ||
|
|
542f938ce2 | ||
|
|
e24d0ac94d | ||
|
|
da2e2544ee | ||
|
|
72add5ab27 | ||
|
|
9ac72ee53f | ||
|
|
c3dac2e385 | ||
|
|
92294a4a92 | ||
|
|
69ff009264 | ||
|
|
27b157580e | ||
|
|
3f288bc9ea | ||
|
|
ce1b9a7daf | ||
|
|
f0512d1a52 | ||
|
|
51866fbd34 | ||
|
|
ee13bc6775 | ||
|
|
e86f62c3e8 | ||
|
|
6c3bf629a1 | ||
|
|
575e779b55 | ||
|
|
dc56ad9816 | ||
|
|
e7d04fc103 | ||
|
|
e2d7d413ef | ||
|
|
e7e9aa0dfa | ||
|
|
f88300a153 | ||
|
|
e54087ece1 | ||
|
|
54561fd2bc | ||
|
|
479c5a514a | ||
|
|
f3c7e1a9dd | ||
|
|
70b5b2f5c6 | ||
|
|
d7811f72ad | ||
|
|
aa20486485 | ||
|
|
33f302a06b | ||
|
|
24cb739d1f | ||
|
|
f0abd6173d | ||
|
|
1817d8f631 | ||
|
|
a308ad5bd7 | ||
|
|
b360527931 | ||
|
|
52b042971a | ||
|
|
2d2778eabf | ||
|
|
d55f8f0492 | ||
|
|
b44d0ea088 | ||
|
|
d981456ddc | ||
|
|
b22c4c4307 | ||
|
|
afc8cc550a | ||
|
|
83b642e98f | ||
|
|
d5d635b7f3 | ||
|
|
6b89e6c381 | ||
|
|
be0dd09801 | ||
|
|
b76cd4abd2 | ||
|
|
0dbf1230bc | ||
|
|
4fd9570332 | ||
|
|
8d77e48190 | ||
|
|
dcce65b2b3 | ||
|
|
4ce31555b2 | ||
|
|
5ed4bc97f3 | ||
|
|
54e37be591 | ||
|
|
eaa717b88a | ||
|
|
bbbc202ee6 | ||
|
|
97364fd0b6 | ||
|
|
c34f11a92f | ||
|
|
e31fc877e2 | ||
|
|
e069fc439e | ||
|
|
5250fcdf08 | ||
|
|
9876ba53f8 | ||
|
|
64662bef8d | ||
|
|
0b8d9084fc | ||
|
|
7be49249d3 | ||
|
|
8a6a8b9623 | ||
|
|
6fc88ff32e | ||
|
|
50928a5027 | ||
|
|
3a431056e2 | ||
|
|
53c3e5f0ab | ||
|
|
0edb025257 | ||
|
|
fded4dbea2 | ||
|
|
7e20e16cff | ||
|
|
1e88f0702a | ||
|
|
68333d34a1 | ||
|
|
740b3f6ae2 | ||
|
|
28fcc53e45 | ||
|
|
2ca477c57f | ||
|
|
9a11d3efd9 | ||
|
|
10d5377ed8 | ||
|
|
ee14efd3c2 | ||
|
|
b4be7d65a6 | ||
|
|
52e1bfae2a | ||
|
|
9c1e703777 | ||
|
|
b49821956a | ||
|
|
a61ba1e7c4 | ||
|
|
d30cc1e119 | ||
|
|
74a3dfc4e1 | ||
|
|
3fe9448229 | ||
|
|
a5cfdfd233 | ||
|
|
bdc19b7c8a | ||
|
|
e92cc8fe2b | ||
|
|
6ee4c62cae | ||
|
|
b047402294 | ||
|
|
7693cecd17 | ||
|
|
558f014d43 | ||
|
|
48508cb5b7 | ||
|
|
44c98e8654 | ||
|
|
9782c264e9 | ||
|
|
9cede6b372 | ||
|
|
decd960867 | ||
|
|
71028e0f06 | ||
|
|
52e96bc0e2 | ||
|
|
178ff62d6a | ||
|
|
9d335eb5cb | ||
|
|
20da3e6352 | ||
|
|
6381959850 | ||
|
|
8916455e4f | ||
|
|
8e214e838e | ||
|
|
23acd3ce01 | ||
|
|
a2e3af0523 | ||
|
|
5455d34f8c | ||
|
|
84512ac77d | ||
|
|
1ec0327ed7 | ||
|
|
0f07b63fd1 | ||
|
|
88ef475629 | ||
|
|
ade61fa756 | ||
|
|
cfc5f7bb2d | ||
|
|
ae9f8304fa | ||
|
|
55755a8e5b | ||
|
|
080050fac2 | ||
|
|
a243ea6353 | ||
|
|
51d2174c0b | ||
|
|
e75db0b14d | ||
|
|
c59a292719 | ||
|
|
be5b8b8dff | ||
|
|
525220b14e | ||
|
|
a9d29c2264 | ||
|
|
8f54dc06a2 | ||
|
|
7daf97f90a | ||
|
|
2cae017738 | ||
|
|
e172f00e0e | ||
|
|
412dacf8be | ||
|
|
cdacf026e4 | ||
|
|
0ca6408580 | ||
|
|
9627a6142d | ||
|
|
6cc783f20b | ||
|
|
3136a75f4d | ||
|
|
a9101f8608 | ||
|
|
af043eda15 | ||
|
|
35c210d36f | ||
|
|
3ed0440bd2 | ||
|
|
c13cff37ef | ||
|
|
fce734662f | ||
|
|
e0ba1a2cd2 | ||
|
|
c72fca2711 | ||
|
|
ae17d88518 | ||
|
|
e19fc49a5f | ||
|
|
95c0378e3c | ||
|
|
7ee3cfd7c9 | ||
|
|
bd2cdeeeab | ||
|
|
77cd93ef89 | ||
|
|
5b063679b5 | ||
|
|
09093a9954 | ||
|
|
df0cfa9735 | ||
|
|
64d7489fd2 | ||
|
|
ecedcd0e7f | ||
|
|
3dff91d691 | ||
|
|
e131ef3714 | ||
|
|
ea0bc278ba | ||
|
|
b553c23d5b | ||
|
|
4f954896a8 | ||
|
|
b259f8b752 | ||
|
|
8be8a8e41b | ||
|
|
79aa060e21 | ||
|
|
f9500729b7 | ||
|
|
204a19e67f | ||
|
|
e6ffe3464c | ||
|
|
0384364c3e | ||
|
|
763facfd78 | ||
|
|
bc88f1dafa | ||
|
|
0c055a1215 | ||
|
|
938d7951ab | ||
|
|
b4466bd9b1 | ||
|
|
31f76aa464 | ||
|
|
c887c164dc | ||
|
|
115ac00222 | ||
|
|
50e79bc087 | ||
|
|
abda616f84 | ||
|
|
9c3048580a | ||
|
|
c1d5faa32a | ||
|
|
d127d8686a | ||
|
|
bc9856b570 | ||
|
|
855071cc19 | ||
|
|
b179540e80 | ||
|
|
6a8e4690d3 | ||
|
|
917ea6ac57 | ||
|
|
7b47a1e842 | ||
|
|
bcd87009e2 | ||
|
|
caf85737c3 | ||
|
|
e1516e0159 | ||
|
|
ee1111e4c9 | ||
|
|
268fe0004c | ||
|
|
0c92a64bb3 | ||
|
|
8b61692754 | ||
|
|
663e6f3ec0 | ||
|
|
17633f5460 | ||
|
|
98c2d2c41b | ||
|
|
5135ff73cb | ||
|
|
58a82cd578 | ||
|
|
d86ea8623b | ||
|
|
cdeeff988e | ||
|
|
930ff266f2 | ||
|
|
d5c0fe632f | ||
|
|
3c5c5eeec2 | ||
|
|
56f017c60c | ||
|
|
b6517840ca | ||
|
|
1ccfea5aa9 | ||
|
|
7e858f4b8d | ||
|
|
7b4f368307 | ||
|
|
06a3502ed8 | ||
|
|
a9a43144ca | ||
|
|
dd968a8ccf | ||
|
|
0d6e1afe54 | ||
|
|
7d9faffd4b | ||
|
|
d7df065320 | ||
|
|
84d4d7f9d9 | ||
|
|
733d6fe56c | ||
|
|
8350544092 | ||
|
|
6a63bc2788 | ||
|
|
66e8c1600e | ||
|
|
82b8d68ffb | ||
|
|
b86bbcd67e | ||
|
|
38b6d607aa | ||
|
|
e1647a5a08 | ||
|
|
bc25190fc7 | ||
|
|
e3a41321cc | ||
|
|
2fd86c93fc | ||
|
|
2b8c461e04 | ||
|
|
a54692d165 | ||
|
|
4b4c59a4bb | ||
|
|
81d688107e | ||
|
|
6e003934fc | ||
|
|
37e1b20ec1 | ||
|
|
d1787b50fd | ||
|
|
9dfc346998 | ||
|
|
9ab4c19945 | ||
|
|
3bab119fa5 | ||
|
|
1fdf3e2aae | ||
|
|
4810aa65a4 | ||
|
|
f798552cf1 | ||
|
|
4dc030d081 | ||
|
|
216499d78b | ||
|
|
60f636ee15 | ||
|
|
f0bf117a04 | ||
|
|
788b6ce821 | ||
|
|
503cd84919 | ||
|
|
118e26f8e2 | ||
|
|
5355881332 | ||
|
|
b94b50a808 | ||
|
|
9b07d32c02 | ||
|
|
986a2851bf | ||
|
|
6474f2c7c2 | ||
|
|
99f7fe736a | ||
|
|
e80d8db417 | ||
|
|
320c53eab0 | ||
|
|
4d5b73df85 | ||
|
|
0faf82702b | ||
|
|
194a8f56e1 | ||
|
|
f046c00d3b | ||
|
|
488353c977 | ||
|
|
c45c604997 | ||
|
|
b2a4ea9304 | ||
|
|
8dc7bf883d | ||
|
|
61f186c8a3 | ||
|
|
e88623e3c8 | ||
|
|
4652db34a4 | ||
|
|
05d72385b5 | ||
|
|
9bb408e1a9 | ||
|
|
10e532bce9 | ||
|
|
4ab7e05e02 | ||
|
|
1cc58e4e09 | ||
|
|
fdaac6df67 | ||
|
|
1d42a343d2 | ||
|
|
0ce34be41d | ||
|
|
5fba913207 | ||
|
|
f7252645ba | ||
|
|
e48d19f895 | ||
|
|
6bad0ad9c4 | ||
|
|
dc5b7dc102 | ||
|
|
55eafb3a9a | ||
|
|
5b6dd36307 | ||
|
|
175c39e1d0 | ||
|
|
84b12574de | ||
|
|
efbb040e3f | ||
|
|
79e3c67bbd | ||
|
|
527099ae72 | ||
|
|
e2f0feef3c | ||
|
|
30e97ad9ec | ||
|
|
07dc76eff0 | ||
|
|
e59dc81658 | ||
|
|
f40443359d | ||
|
|
6b0f2ef4bd | ||
|
|
12aa03f5b8 | ||
|
|
73a96dc588 | ||
|
|
980cd5bfd8 | ||
|
|
86cc9f3dfb | ||
|
|
1ae604fcf4 | ||
|
|
5e93fe96d3 | ||
|
|
31745320c8 | ||
|
|
2da6cd7f84 | ||
|
|
6e0e1ad9cb | ||
|
|
dd62c94d05 | ||
|
|
ee70b99143 | ||
|
|
b3a526814e | ||
|
|
69a15ae173 | ||
|
|
1d7f95da8e | ||
|
|
8ec57d145e | ||
|
|
3ef9f6f016 | ||
|
|
990b676e13 | ||
|
|
5cdfe9c7ae | ||
|
|
033d1eb7af | ||
|
|
ac62ef430d | ||
|
|
928be0f1fd | ||
|
|
6f75290678 | ||
|
|
8c2b50c7ed | ||
|
|
2b1695e09b | ||
|
|
ef604f6100 | ||
|
|
f3c5745468 | ||
|
|
e4835f535d | ||
|
|
33c2873ae9 | ||
|
|
dac4bb22d3 | ||
|
|
b52c80e85c | ||
|
|
f15c6b68b6 | ||
|
|
3f778d70f7 | ||
|
|
6fc114d681 | ||
|
|
9a9d09845c | ||
|
|
7fa687b3e1 | ||
|
|
493da54113 | ||
|
|
541929258b | ||
|
|
370f242fa2 | ||
|
|
7047c67a5e | ||
|
|
18c75a81f9 | ||
|
|
01c747e7db | ||
|
|
186aedda98 | ||
|
|
ca0e25b1a1 | ||
|
|
f87a694d10 | ||
|
|
006227baed | ||
|
|
4d28b5ed22 | ||
|
|
499475bb41 | ||
|
|
666dae4229 | ||
|
|
ac1c041377 | ||
|
|
0366ea39c5 | ||
|
|
80f53176d9 | ||
|
|
40c02989f1 | ||
|
|
50e190ff54 | ||
|
|
dd20a297d6 | ||
|
|
c0ad29c06c | ||
|
|
d091d4a8bb | ||
|
|
381b845307 | ||
|
|
48cdedc97b | ||
|
|
7c6cd3a9e1 | ||
|
|
bcdd73369f | ||
|
|
86bec20b56 | ||
|
|
c3b2b89473 | ||
|
|
85f05c57d1 | ||
|
|
16d91246c4 | ||
|
|
726cb43be9 | ||
|
|
288302c2cf | ||
|
|
609671aabc | ||
|
|
b9a8315696 | ||
|
|
27e18b6efa | ||
|
|
9d331ce04b | ||
|
|
916569102c | ||
|
|
28f9b9b611 | ||
|
|
7679620f4b | ||
|
|
8a11da4e14 | ||
|
|
f11867d810 | ||
|
|
6f8501e9a1 | ||
|
|
37fe6d56e5 | ||
|
|
ff8f11d79c | ||
|
|
cbc113492a | ||
|
|
74702554da | ||
|
|
bd29015022 | ||
|
|
2192805360 | ||
|
|
db0b93c0ad | ||
|
|
94947f2523 | ||
|
|
29c6e22024 | ||
|
|
390f3cf35b | ||
|
|
20c033b484 | ||
|
|
8068ef96b6 | ||
|
|
9d36258923 | ||
|
|
9fdeb82328 | ||
|
|
2abfae283c | ||
|
|
b6848a3edb | ||
|
|
e2bf9eb8e9 | ||
|
|
a77659e47d | ||
|
|
e9da14ac2e | ||
|
|
a4bf22e620 | ||
|
|
a6b4065e13 | ||
|
|
07ebf35987 | ||
|
|
47ebd0789c | ||
|
|
166fd50451 | ||
|
|
0604d3dbf2 | ||
|
|
1fa258c2b4 | ||
|
|
3745c526f1 | ||
|
|
c123c702ab | ||
|
|
4aae7bcca6 | ||
|
|
aa62e93094 | ||
|
|
45862f4c16 | ||
|
|
3b1e0b66bb | ||
|
|
a7d8ccd265 | ||
|
|
d4c923a5cc | ||
|
|
e426cb1d1a | ||
|
|
3c87a0d0dc | ||
|
|
499766f6ab | ||
|
|
35a6436983 | ||
|
|
341745d4d5 | ||
|
|
78c1f2839e | ||
|
|
de2d967abd | ||
|
|
6611d92e21 | ||
|
|
e1a49ca426 | ||
|
|
f73ee5eade | ||
|
|
0d75d2585f | ||
|
|
3b0f944e23 | ||
|
|
aaeab58ce6 | ||
|
|
5894c02a34 | ||
|
|
f1221b510b | ||
|
|
274ab349f4 | ||
|
|
86392fb800 | ||
|
|
adc156ab2a | ||
|
|
47d3a450a4 | ||
|
|
5c89fd679d | ||
|
|
1cad759306 | ||
|
|
5b8b379feb | ||
|
|
f538fd8eb4 | ||
|
|
4dd5428b13 | ||
|
|
64ec220d5d | ||
|
|
cbfec0d281 | ||
|
|
80c044f2d3 | ||
|
|
1b2dda8c4c | ||
|
|
473bdad00b | ||
|
|
4482e75f38 | ||
|
|
43c530922a | ||
|
|
dd60f088ed | ||
|
|
0117aeafbf | ||
|
|
442861581a | ||
|
|
4e809c951d | ||
|
|
215fd2a11d | ||
|
|
13b705e227 | ||
|
|
8083804575 | ||
|
|
ec0916c59d | ||
|
|
7392cd1a1a | ||
|
|
2656a0e070 | ||
|
|
5b5df9ae8e | ||
|
|
fafbcc8e2f | ||
|
|
c55402caa2 | ||
|
|
d132dc7640 | ||
|
|
48a2e3844d | ||
|
|
d911bf3889 | ||
|
|
dcf53a1d12 | ||
|
|
3bdfa284a9 | ||
|
|
cb9f1eefd2 | ||
|
|
dd99a4b3dc | ||
|
|
e79a5de7df | ||
|
|
c24da0b886 | ||
|
|
be4fd51289 | ||
|
|
2cbdb95ce5 | ||
|
|
716ce49ce9 | ||
|
|
34b9ac8a5d | ||
|
|
c265f451f2 | ||
|
|
2058652fa4 | ||
|
|
50b3cfccb1 | ||
|
|
5e35aeca9e | ||
|
|
05798672c8 | ||
|
|
7929b6e756 | ||
|
|
2756900749 | ||
|
|
539853df36 | ||
|
|
651db36674 | ||
|
|
f9df545e3c | ||
|
|
5e62ede8d0 | ||
|
|
7f41c9a015 | ||
|
|
ac7727861e | ||
|
|
943a0938e7 | ||
|
|
6580d9478e | ||
|
|
36d411c25d | ||
|
|
8aae166a5b | ||
|
|
aaad0354e6 | ||
|
|
f3365dd251 | ||
|
|
aaa1370a36 | ||
|
|
c41b67ea08 | ||
|
|
0b562bcabc | ||
|
|
1e41a015b5 | ||
|
|
8b82cc7073 | ||
|
|
e19b30bd26 | ||
|
|
09897c8d0d | ||
|
|
d4ddbcea96 | ||
|
|
00af021abb | ||
|
|
8118623680 | ||
|
|
2c594dd996 | ||
|
|
d8b7156b5c | ||
|
|
d4a609c6cd | ||
|
|
bf243f30d3 | ||
|
|
3ce82facac | ||
|
|
fb1458815a | ||
|
|
2243b065e8 | ||
|
|
718694d5ee | ||
|
|
77f38cb6f1 | ||
|
|
ca017980a3 | ||
|
|
4105da206a | ||
|
|
34e7ca90fc | ||
|
|
687abe7803 | ||
|
|
9b1820a7ad | ||
|
|
5f320cc540 | ||
|
|
23b8f008e0 | ||
|
|
d95288175f | ||
|
|
b83f7ac06b | ||
|
|
f7af730b50 | ||
|
|
01be5bff02 | ||
|
|
e825df6448 | ||
|
|
ff41b0d435 | ||
|
|
e162377ca3 | ||
|
|
d1080d5456 | ||
|
|
64b5a76bec | ||
|
|
7cfb1bdc70 | ||
|
|
441951a93b | ||
|
|
154e91bb23 | ||
|
|
cb40511807 | ||
|
|
452c68115f | ||
|
|
b35123ba48 | ||
|
|
978e06a623 | ||
|
|
15c9fed60f | ||
|
|
2302179237 | ||
|
|
318e335137 | ||
|
|
11301a64fb | ||
|
|
1c912de9cc | ||
|
|
d1759fdfa9 | ||
|
|
c102bf28e3 | ||
|
|
e65059e431 | ||
|
|
5454f2abd0 | ||
|
|
cc4f5ba7ba | ||
|
|
062616e4dd | ||
|
|
6846a1cc11 | ||
|
|
6fd5ef2d99 | ||
|
|
87107413f5 | ||
|
|
5986953317 | ||
|
|
9d2dd2c49a | ||
|
|
54d99d6ab2 | ||
|
|
77b975d16f | ||
|
|
c464cc6376 | ||
|
|
93e84403bb | ||
|
|
5b8327038a | ||
|
|
eba0a3633b | ||
|
|
de73063977 | ||
|
|
eca9e8eb70 | ||
|
|
a4a44a41ae | ||
|
|
a02edb9e69 | ||
|
|
368cce93ff | ||
|
|
d8d11023d3 | ||
|
|
4803ce010e | ||
|
|
b7875fc02a | ||
|
|
544ca6035a | ||
|
|
0238558a4b | ||
|
|
bc414b698d | ||
|
|
ace1e21894 | ||
|
|
8a56a6836a | ||
|
|
83849e0a36 | ||
|
|
618f2e33e8 | ||
|
|
fe53caf997 | ||
|
|
d83074ae05 | ||
|
|
0cef6bd0ac | ||
|
|
d42b38699b | ||
|
|
98804cb860 | ||
|
|
d033e92234 | ||
|
|
ec7cef98d8 | ||
|
|
aedad89560 | ||
|
|
f45b3c87bf | ||
|
|
e94850f322 | ||
|
|
de80a540a7 | ||
|
|
392a86f585 | ||
|
|
265f5b77a7 | ||
|
|
aef2ac5c04 | ||
|
|
75e5e59385 | ||
|
|
6c21009c76 | ||
|
|
9192e0a28d | ||
|
|
47e201837f | ||
|
|
4847c5695c | ||
|
|
391feb698e | ||
|
|
a4714e5b75 | ||
|
|
4dae5ee264 | ||
|
|
7e9739db57 | ||
|
|
1e557f4bd9 | ||
|
|
ca19204cf4 | ||
|
|
03977354cb | ||
|
|
c43395fafa | ||
|
|
7cf6fe2209 | ||
|
|
9ea20bac42 | ||
|
|
945f49ab5e | ||
|
|
6c9a258d82 | ||
|
|
f2eeb4301c | ||
|
|
c117eaf5a2 | ||
|
|
3e43ff7414 | ||
|
|
bb21cf6f0e | ||
|
|
bfe6f299d0 | ||
|
|
e19ba47875 | ||
|
|
7227a2653d | ||
|
|
61665ddd10 | ||
|
|
0caac70994 | ||
|
|
83ba59749f | ||
|
|
20a429c048 | ||
|
|
cf43ca2a7b | ||
|
|
4001e21624 | ||
|
|
bbf819e2d1 | ||
|
|
0cb9bb3b54 | ||
|
|
5c91623148 | ||
|
|
5b913884cf | ||
|
|
346d4c587c | ||
|
|
d5b16c8b1a | ||
|
|
e78eeedc75 | ||
|
|
87db3cfad3 | ||
|
|
54fdc6866e | ||
|
|
2eaac80c86 | ||
|
|
b3d0848d09 | ||
|
|
0c6990bc95 | ||
|
|
d9bba67d18 | ||
|
|
140a3d0aef | ||
|
|
31fe800d6a | ||
|
|
3996bbb8cb | ||
|
|
c2599cb116 | ||
|
|
2c13074f6c | ||
|
|
059743a1b0 | ||
|
|
73cd1f4e88 | ||
|
|
a54806e5c1 | ||
|
|
e6a0521ca2 | ||
|
|
43eadf278c | ||
|
|
5f375a182d | ||
|
|
663dd6ed8b | ||
|
|
226c2a0d83 | ||
|
|
b4b4b6cb1c | ||
|
|
9985fc40f4 | ||
|
|
b1de4c8cba | ||
|
|
6a4e424630 | ||
|
|
ebb67c135e | ||
|
|
326dcf2470 | ||
|
|
86eb80ecdc | ||
|
|
2003ba356b | ||
|
|
037a000cc8 | ||
|
|
8a771450d2 | ||
|
|
1e7dc06ab8 | ||
|
|
ca841c56a8 | ||
|
|
79eebf1993 | ||
|
|
bbccf4acd5 | ||
|
|
9e7ddd5efc | ||
|
|
6089f443b9 | ||
|
|
84eb7031bb | ||
|
|
f22029bf3d | ||
|
|
d7b79b4481 | ||
|
|
b5faaf7116 | ||
|
|
b4f2ada820 | ||
|
|
8a66930bd7 | ||
|
|
2ebeed6753 | ||
|
|
23d8ba41d5 | ||
|
|
4f9e805d44 | ||
|
|
3f7107839e | ||
|
|
bb62c49489 | ||
|
|
ae6018355c | ||
|
|
0805ec051f | ||
|
|
e27b91ffb8 | ||
|
|
0a7b34eefc | ||
|
|
549cac90af | ||
|
|
ba0b41dd92 | ||
|
|
2df261e42b | ||
|
|
38adb35abe | ||
|
|
520ded60e3 | ||
|
|
ae56df7d4f | ||
|
|
412591dfaf | ||
|
|
57f8f1ec92 | ||
|
|
f0434789cf | ||
|
|
c2f6decb9c | ||
|
|
9eeed25418 | ||
|
|
67562081f7 | ||
|
|
41917eb1f2 | ||
|
|
c3e996f10f | ||
|
|
63f6827a0d | ||
|
|
96e2271cce | ||
|
|
ac3c83f966 | ||
|
|
b9c8e61d39 | ||
|
|
a6056408dd | ||
|
|
b9479cf7ab | ||
|
|
452a5badc1 | ||
|
|
d645bf0966 | ||
|
|
50addaa91e | ||
|
|
02a3bbaa3d | ||
|
|
a20d80565b | ||
|
|
56adb52a21 | ||
|
|
8c2fc6daf8 | ||
|
|
4bd9932703 | ||
|
|
2a1d4b7563 | ||
|
|
b394431f18 | ||
|
|
cc628717d8 | ||
|
|
f3e00133a0 | ||
|
|
606961f49d | ||
|
|
13591c7c00 | ||
|
|
28f4061892 | ||
|
|
018fe80bcb | ||
|
|
0a43ff9c13 | ||
|
|
9aae143833 | ||
|
|
c8e2531c8b | ||
|
|
9290004bb8 | ||
|
|
cbebefebc4 | ||
|
|
6f3897ce2c | ||
|
|
ea5878f590 | ||
|
|
46f8e50614 | ||
|
|
70dc97231e | ||
|
|
f6a053df6e | ||
|
|
af4ef8ad8d | ||
|
|
13797a1fb8 | ||
|
|
3ad8fb8634 | ||
|
|
ab43005422 | ||
|
|
b1f131964e | ||
|
|
1a87b69376 | ||
|
|
5a3b109e25 | ||
|
|
a67c7461ee | ||
|
|
e0aa4bb492 | ||
|
|
ab0947ee37 | ||
|
|
bd0227450e | ||
|
|
f438f1e9ef | ||
|
|
3f7b2c1ade | ||
|
|
6e35a3b3ce | ||
|
|
d3dd672640 | ||
|
|
2a46be8cf3 | ||
|
|
1b4370bde1 | ||
|
|
cc6a776034 | ||
|
|
2cfb3834f2 | ||
|
|
46135d830e | ||
|
|
318e42e35b | ||
|
|
c7f04e24d3 | ||
|
|
e4650eff58 | ||
|
|
869d91269d | ||
|
|
df1092ef33 | ||
|
|
4c5b2833b3 | ||
|
|
7fe653c350 | ||
|
|
661715733a | ||
|
|
f17cb1bf50 | ||
|
|
9ec06df79f | ||
|
|
67d0375b98 | ||
|
|
4882b8ba67 | ||
|
|
108760e17b | ||
|
|
f15e7e89d2 | ||
|
|
e2788aa729 | ||
|
|
772f99fd74 | ||
|
|
9bbcdeefd0 | ||
|
|
a21cc161de | ||
|
|
e818b7c206 | ||
|
|
5723d788a4 | ||
|
|
1d6698a754 | ||
|
|
1fce83b936 | ||
|
|
ccdd1ea6c4 | ||
|
|
348734584b | ||
|
|
c6a79ff72d | ||
|
|
b6f1391da3 | ||
|
|
ce94c0e729 | ||
|
|
58befe280c | ||
|
|
4c0f4ccb65 | ||
|
|
085677d511 | ||
|
|
0a922ad1dc | ||
|
|
83c3bb2f1a | ||
|
|
83087a45f0 | ||
|
|
cadf202107 | ||
|
|
36700d36a7 | ||
|
|
ad85f6e413 | ||
|
|
536526cc92 | ||
|
|
ac9c20b048 | ||
|
|
2db35f0ce7 | ||
|
|
dbfa7031d2 | ||
|
|
c2d0e86431 | ||
|
|
68ec6a9f5b | ||
|
|
753b0717be | ||
|
|
3bdad260b0 | ||
|
|
d205dc23e9 | ||
|
|
bdd26d71b2 | ||
|
|
8b2f6faf18 | ||
|
|
7c01bbddf8 | ||
|
|
1752ee3c8b | ||
|
|
5c2d8ffe33 | ||
|
|
7fecd5c8c6 | ||
|
|
19b7ff12ad | ||
|
|
b053234eb1 | ||
|
|
640d7bd365 | ||
|
|
8af68e779f | ||
|
|
3a1198cac5 | ||
|
|
022ab4516d | ||
|
|
17aac9b15f | ||
|
|
6c0c9abd57 | ||
|
|
70496c15e1 | ||
|
|
8b61e68bb7 | ||
|
|
bb75d80d33 | ||
|
|
157d7d45f5 | ||
|
|
b5cba73cc3 | ||
|
|
dd36264aad | ||
|
|
ddb47758f3 | ||
|
|
9539bbf78a | ||
|
|
0f8e7c3843 | ||
|
|
b835330714 | ||
|
|
310db14ed6 | ||
|
|
7f2e9d9a6b | ||
|
|
6cc9c09610 | ||
|
|
93c60c34e1 | ||
|
|
02c11dd4a7 | ||
|
|
40dc575aa4 | ||
|
|
f8101771c9 | ||
|
|
8f4d6973fb | ||
|
|
ced3a4bc19 | ||
|
|
cb22583212 | ||
|
|
414b35ea56 | ||
|
|
f469905d07 | ||
|
|
20f4b2c91d | ||
|
|
37543bd1d9 | ||
|
|
0dc0052e93 | ||
|
|
bd27473762 | ||
|
|
9dccf91da7 | ||
|
|
a1323eb204 | ||
|
|
e57c4406f3 | ||
|
|
fdd4b4ee22 | ||
|
|
8ef551bf9c | ||
|
|
2119fb4314 | ||
|
|
0166544319 | ||
|
|
874a64e5f6 | ||
|
|
e0c03a11ab | ||
|
|
3c7f80f58f | ||
|
|
229ea3f86c | ||
|
|
41eb386063 | ||
|
|
dfc7cd97a3 | ||
|
|
280ac26464 | ||
|
|
88cca8a6eb | ||
|
|
9c263e3e2b | ||
|
|
7d4e143dee | ||
|
|
3343c1afa4 | ||
|
|
b279df2e67 | ||
|
|
e6f340d245 | ||
|
|
bfc66cceaa | ||
|
|
1105b6bd94 | ||
|
|
694d390710 | ||
|
|
6b6b43402b | ||
|
|
6f46270735 | ||
|
|
ee5e34a19c | ||
|
|
70902b4051 | ||
|
|
f46304e8ae | ||
|
|
40252f0aa6 | ||
|
|
e7b9cc4705 | ||
|
|
867a26fe4f | ||
|
|
3890105cdc | ||
|
|
d2219a800a | ||
|
|
ccb59480bd | ||
|
|
b5c5209162 | ||
|
|
835b6761b7 | ||
|
|
f30c836696 | ||
|
|
090ce00afc | ||
|
|
377986d599 | ||
|
|
95e4d837ef | ||
|
|
e08e35984c | ||
|
|
a3b4c8a0f2 | ||
|
|
700e47d6e2 | ||
|
|
ea11f5ff3d | ||
|
|
758c7f2d84 | ||
|
|
ef06371c93 | ||
|
|
85a0f25b95 | ||
|
|
84b00b362f | ||
|
|
bfd7601cf9 | ||
|
|
4676a89963 | ||
|
|
8cd3c25b41 | ||
|
|
5f97603684 | ||
|
|
f1debd4701 | ||
|
|
1cd0d9a1f2 | ||
|
|
a6320bbad3 | ||
|
|
b1dd8e998b | ||
|
|
c2e8f06bfa | ||
|
|
08a8f7174a | ||
|
|
ce4c1d4f35 | ||
|
|
a0b9bd527e | ||
|
|
ce05ef7110 | ||
|
|
6a47d966a4 | ||
|
|
85d99de26b | ||
|
|
4a82251c62 | ||
|
|
e62c0a58a7 | ||
|
|
1f3e48f18f | ||
|
|
bbbe11790b | ||
|
|
13edf62824 | ||
|
|
558bc2e132 | ||
|
|
0f73129ab7 | ||
|
|
1373efaa39 | ||
|
|
5c37b777fc | ||
|
|
d4df3f2154 | ||
|
|
8ae424c5a3 | ||
|
|
cae19df058 | ||
|
|
8c211fc8df | ||
|
|
74a71f7824 | ||
|
|
12b51c5eb8 | ||
|
|
14069fd8e6 | ||
|
|
cd62f41606 | ||
|
|
109d4ee490 | ||
|
|
18ebec8276 | ||
|
|
c47b4f828f | ||
|
|
c3a0c0c451 | ||
|
|
6cb0de43ce | ||
|
|
83f0d3e03d | ||
|
|
eda4130703 | ||
|
|
ccba859812 | ||
|
|
de3cf5e8d7 | ||
|
|
ce305321b6 | ||
|
|
e6117e978e | ||
|
|
4b40898743 | ||
|
|
ae3a0ec27e | ||
|
|
d9458fb4ee | ||
|
|
27f67edb1a | ||
|
|
3ffea738e6 | ||
|
|
a63dd6020c | ||
|
|
d0678bc3e5 | ||
|
|
ce04a073ef | ||
|
|
c337a367f3 | ||
|
|
7ae40cb352 | ||
|
|
e8daab7971 | ||
|
|
78c3a5ccfa | ||
|
|
2142c75846 | ||
|
|
c724d8f614 | ||
|
|
af5f4ee724 | ||
|
|
01aa4394a6 | ||
|
|
2646519712 | ||
|
|
5b2efd563a | ||
|
|
e7b7432079 | ||
|
|
ea2ef4443b | ||
|
|
25f22ec561 | ||
|
|
5189231a34 | ||
|
|
bcbd30bb8a | ||
|
|
c245183101 | ||
|
|
4ce2a84df0 | ||
|
|
3c31d711b3 | ||
|
|
3f5d8390ba | ||
|
|
78edafcaac | ||
|
|
1ce3673006 | ||
|
|
3423de65fa | ||
|
|
0c81439bc3 | ||
|
|
77fb8ac240 | ||
|
|
979dfb8cc6 | ||
|
|
fe0289f2f5 | ||
|
|
6a64567dd7 | ||
|
|
8de8cd62ca | ||
|
|
cba27d2920 | ||
|
|
9ade179407 | ||
|
|
82b85431bd | ||
|
|
98778b1870 | ||
|
|
dfd46c23f9 | ||
|
|
3ac4407b88 | ||
|
|
8ea0d5212f | ||
|
|
acd350d833 | ||
|
|
2f4b9f619d | ||
|
|
70efd0274c | ||
|
|
33b3eea6ec | ||
|
|
113624691a | ||
|
|
afaec1a2e9 | ||
|
|
ddf39f2d57 | ||
|
|
2df5d95d70 | ||
|
|
64a808ac76 | ||
|
|
05dc7183cb | ||
|
|
e69e181090 | ||
|
|
a1269fa669 | ||
|
|
8369b5209f | ||
|
|
2aa3c0a2af | ||
|
|
ac65d8369e | ||
|
|
7a24532224 | ||
|
|
8057d668bb | ||
|
|
36f1bc4a8a | ||
|
|
beb8098b0a | ||
|
|
6e64a71382 | ||
|
|
3cbd57d9ad | ||
|
|
4f50b26af0 | ||
|
|
cb651b5866 | ||
|
|
3c1069c815 | ||
|
|
7f0020a407 | ||
|
|
c270c1c80c | ||
|
|
29ecc2d8bb | ||
|
|
13da1b8d28 | ||
|
|
0b338eaa28 | ||
|
|
46696865fd | ||
|
|
fcea3777c0 | ||
|
|
5023050d95 | ||
|
|
bed01a303f | ||
|
|
2c2cb84ca7 | ||
|
|
e9dda25c60 | ||
|
|
80ffbade22 | ||
|
|
7beb50caa7 | ||
|
|
e8ba43c479 | ||
|
|
dcd6bedc27 | ||
|
|
5bb76cc35c | ||
|
|
3e68d485f2 | ||
|
|
1945f09d06 | ||
|
|
2c66bdd6bb | ||
|
|
a4f3548bbf | ||
|
|
4276abc58b | ||
|
|
a795d93bc3 | ||
|
|
5df04cb763 | ||
|
|
ef54167a4a | ||
|
|
d42cb11b84 | ||
|
|
b257de4aba | ||
|
|
365b4babae | ||
|
|
6d48dffa2f | ||
|
|
8f2999b6af | ||
|
|
be6115fbfa | ||
|
|
2fcb8f5db7 | ||
|
|
0ab3f020ab | ||
|
|
64c23c2f5b | ||
|
|
ff16e0f6df | ||
|
|
1a82ba196b | ||
|
|
ed72c678f8 | ||
|
|
4ed8836a71 | ||
|
|
5529978fa7 | ||
|
|
66d84c9914 | ||
|
|
b85ddc4e4f | ||
|
|
e4a9e27a55 | ||
|
|
22645eea2e | ||
|
|
345c98ed62 | ||
|
|
b872ff0237 | ||
|
|
1b95718460 | ||
|
|
6a3580c556 | ||
|
|
16c9fba5de | ||
|
|
4e952af614 | ||
|
|
6344c3051c | ||
|
|
ab9f521cbd | ||
|
|
3a900e5bb7 | ||
|
|
b4d7741611 | ||
|
|
95fd79faf9 | ||
|
|
b79dc01016 | ||
|
|
bf562d7373 | ||
|
|
2e9f2ea3d3 | ||
|
|
177dbbc29a | ||
|
|
4712043e26 | ||
|
|
852acd5e4e | ||
|
|
9f1daabb2c | ||
|
|
938dd24cc9 | ||
|
|
57aad81b68 | ||
|
|
a91bcaaeb0 | ||
|
|
d04c21b198 | ||
|
|
4a0a42c2f1 | ||
|
|
cc7b9af50e | ||
|
|
68fef49c55 | ||
|
|
5d4b149884 | ||
|
|
5f20ae707d | ||
|
|
e9c915e6fe | ||
|
|
2ed158aba3 | ||
|
|
05050d53ad | ||
|
|
e391311512 | ||
|
|
3234c28f7c | ||
|
|
6fbd9cf24b | ||
|
|
bc5b63ffef | ||
|
|
788ef76f1c | ||
|
|
0872ec3204 | ||
|
|
0a5870208e | ||
|
|
3219334c3e | ||
|
|
79fd662676 | ||
|
|
34193fd8d9 | ||
|
|
2203766f77 | ||
|
|
235cbe0e57 | ||
|
|
f50f353b5d | ||
|
|
00afe6cc96 | ||
|
|
dd48e62b7e | ||
|
|
a1a780e847 | ||
|
|
fa87077211 | ||
|
|
6ac7145d2d | ||
|
|
f1226f19b2 | ||
|
|
3ecbf2af25 | ||
|
|
79f2e95bf9 | ||
|
|
faee50b238 | ||
|
|
807d4a3c00 | ||
|
|
073d112204 | ||
|
|
14f814b806 | ||
|
|
a288c2b3a3 | ||
|
|
fec16b0ac8 | ||
|
|
dd8717797e | ||
|
|
7e7c239f09 | ||
|
|
edd0e8abb1 | ||
|
|
d2b537d9a1 | ||
|
|
8c3df224ef | ||
|
|
967fd2a778 | ||
|
|
ea12e446ca | ||
|
|
c8cd2b510f | ||
|
|
8b05a8322b | ||
|
|
c98a51b26c | ||
|
|
e2717a031e | ||
|
|
8d33ce0154 | ||
|
|
92745aa950 | ||
|
|
cbc6bf6a89 | ||
|
|
f72575e75f | ||
|
|
0168f55f3e | ||
|
|
8b60ab86a1 | ||
|
|
7463a7a509 | ||
|
|
9ed2de3d6e | ||
|
|
4f35fb59c8 | ||
|
|
59ba8f28c8 | ||
|
|
d298b578ab | ||
|
|
fabbc035c4 | ||
|
|
6530b07cde | ||
|
|
f8b7eaec93 | ||
|
|
5c226e91c0 | ||
|
|
8e3d45d2dc | ||
|
|
a96b522958 | ||
|
|
fedf81c2b7 | ||
|
|
0c6f816a49 | ||
|
|
dfe771fb0c | ||
|
|
bc19e2d84b | ||
|
|
8c4d91cff7 | ||
|
|
2fcc18779b | ||
|
|
96cc3e5a0b | ||
|
|
cc8fe0630c | ||
|
|
1d9e76bb0f | ||
|
|
337110b7a0 | ||
|
|
83733205f6 | ||
|
|
d8306938a1 | ||
|
|
88ea8b305d | ||
|
|
e2f4d7b5e3 | ||
|
|
8140869767 | ||
|
|
6a8de87116 | ||
|
|
0da6f24221 | ||
|
|
771e60bd07 | ||
|
|
40b3c4883f | ||
|
|
e320f4a988 | ||
|
|
5835f15f21 | ||
|
|
67c311233b | ||
|
|
3fcff32524 | ||
|
|
472f065ce7 | ||
|
|
6c6d7eb770 | ||
|
|
c646ada3c3 | ||
|
|
f55359b3ac | ||
|
|
9d9a17547a | ||
|
|
c6dc88766b | ||
|
|
754ce9dec6 | ||
|
|
bd5f685d0a | ||
|
|
c663e24669 | ||
|
|
5948764e9e | ||
|
|
539ad44757 | ||
|
|
74994a2ec1 | ||
|
|
97dced6a0b | ||
|
|
e04acb09ce | ||
|
|
87ed7fc932 | ||
|
|
90744301d3 | ||
|
|
bf4879f57f | ||
|
|
e22b445cff | ||
|
|
5ab7970e18 | ||
|
|
e984eeedc4 | ||
|
|
968b5a0984 | ||
|
|
7af1282375 | ||
|
|
d9fcc32f70 | ||
|
|
870a9fc3b2 | ||
|
|
8e3703abeb | ||
|
|
ba81277bbe | ||
|
|
88293a4b8a | ||
|
|
981104519e | ||
|
|
1d254a3674 | ||
|
|
f88d171afd | ||
|
|
ba2091725e | ||
|
|
7c120b8bc5 | ||
|
|
5cc5429f99 | ||
|
|
09d71239b6 | ||
|
|
c643e4585e | ||
|
|
873db29391 | ||
|
|
81a933ae38 | ||
|
|
ecb3c7bcef | ||
|
|
80000b904c | ||
|
|
c47c9cd440 | ||
|
|
b4a0941d4c | ||
|
|
c03d6a1ec3 | ||
|
|
46d39ebaf7 | ||
|
|
fe68737268 | ||
|
|
2360bf907a | ||
|
|
aa093e991e | ||
|
|
a5974999eb | ||
|
|
24a6ff54c2 | ||
|
|
e89ea3360e | ||
|
|
85f8552c4d | ||
|
|
a287e3ced7 | ||
|
|
8e4d8d13b8 | ||
|
|
cf208ad21b | ||
|
|
0faed16899 | ||
|
|
8d1c0ad07c | ||
|
|
165e89c266 | ||
|
|
b4e19cfd62 | ||
|
|
20ad96f3cd | ||
|
|
d64a37772f | ||
|
|
5fb6f94579 | ||
|
|
20535348db | ||
|
|
3d83a265c5 | ||
|
|
18a8a61cc5 | ||
|
|
1758621a51 | ||
|
|
5710247bf6 | ||
|
|
78b03929b7 | ||
|
|
492362ec7d | ||
|
|
51b24a1dc6 | ||
|
|
cfdb48c864 | ||
|
|
14567952b3 | ||
|
|
2b052671e2 | ||
|
|
439a126af6 | ||
|
|
0fb35f081a | ||
|
|
9ba25c7219 | ||
|
|
af9c447146 | ||
|
|
ee6b39aa6c | ||
|
|
839133c5e1 | ||
|
|
f4eb48e531 | ||
|
|
18439cf2d7 | ||
|
|
d3c16608e4 | ||
|
|
3e27ff1b95 | ||
|
|
ff91698fb5 | ||
|
|
c389616657 | ||
|
|
442578ca25 | ||
|
|
0b51d6221a | ||
|
|
2f9f9afac2 | ||
|
|
9711a5d647 | ||
|
|
cc679aa714 | ||
|
|
457ef2c190 | ||
|
|
17ffb0855f | ||
|
|
125fc8f1f0 | ||
|
|
1660903aa2 | ||
|
|
b013c58537 | ||
|
|
a5b0d88608 | ||
|
|
02d50f8c6e | ||
|
|
e09ef62d5b | ||
|
|
a75bc0703f | ||
|
|
80ecea82e8 | ||
|
|
54cd46372a | ||
|
|
282cba20a0 | ||
|
|
2479ce2c8e | ||
|
|
9aa4b6bd9b | ||
|
|
6c10024420 | ||
|
|
e559194fb2 | ||
|
|
1c472348b6 | ||
|
|
5a8bce6353 | ||
|
|
f9b31591f9 | ||
|
|
1527e64ee7 | ||
|
|
f7652db4f1 | ||
|
|
8b75fb14c5 | ||
|
|
07f9a1a9f0 | ||
|
|
7d8bac2711 | ||
|
|
cad9479a00 | ||
|
|
dfc8a375f6 | ||
|
|
7c9bdb4b7a | ||
|
|
f8bb0d9cc8 | ||
|
|
b185e104ed | ||
|
|
e57a4c7c0c | ||
|
|
d2f187e1a1 | ||
|
|
c9aca33030 | ||
|
|
2b0911531c | ||
|
|
2149185fc2 | ||
|
|
0159da9f37 | ||
|
|
680283d69f | ||
|
|
c71f339e01 | ||
|
|
c91c96565f | ||
|
|
b72fc69fbe | ||
|
|
a1732c21d8 | ||
|
|
b83441081c | ||
|
|
8a76568ea8 | ||
|
|
c4dc9d273a | ||
|
|
66cf2df780 | ||
|
|
c1a245d1c8 | ||
|
|
e40b09fe61 | ||
|
|
eb2b4ea8aa | ||
|
|
e055ed0489 | ||
|
|
dd6d7cad3a | ||
|
|
37b2274e10 | ||
|
|
91cfbd4146 | ||
|
|
d4817399ff | ||
|
|
48d259da68 | ||
|
|
f86fa6a062 | ||
|
|
93cb0a47e4 | ||
|
|
a12760c038 | ||
|
|
fdcd6a3a4c | ||
|
|
cb7891f4ff | ||
|
|
8f2684fa70 | ||
|
|
7ebf48ef42 | ||
|
|
1d67b014cb | ||
|
|
4bfca75012 | ||
|
|
8e411afca3 | ||
|
|
10d44a0302 | ||
|
|
907a3aa88f | ||
|
|
862bf2eae1 | ||
|
|
2b70658b2c | ||
|
|
def9adac4e | ||
|
|
083bb154ba | ||
|
|
ebc757ac6b | ||
|
|
c6dfd5f2d3 | ||
|
|
99695d57ab | ||
|
|
ca3752f824 | ||
|
|
d0ca58bbb1 | ||
|
|
580fa3a5a7 | ||
|
|
365dc2ff59 | ||
|
|
a81ae3c3f9 | ||
|
|
8fd59f2e7d | ||
|
|
02afcb00e9 | ||
|
|
d6a5bfe2d4 | ||
|
|
bb0bf2fa8e | ||
|
|
2c1e6b54f9 | ||
|
|
40f755df20 | ||
|
|
8d32651c53 | ||
|
|
86b77f3ca8 | ||
|
|
bd62eb17e3 | ||
|
|
6c045c00df | ||
|
|
f5ad93714d | ||
|
|
92ec29fe3f | ||
|
|
bc221fb27e |
46
.appveyor.yml
Normal file
46
.appveyor.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
ORIGPATH: '%PATH%'
|
||||
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
|
||||
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
|
||||
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||
PATH: '%PATHCC64%'
|
||||
RCLONE_CONFIG_PASS:
|
||||
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
|
||||
|
||||
install:
|
||||
- choco install winfsp -y
|
||||
- choco install zip -y
|
||||
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
|
||||
|
||||
build_script:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go install
|
||||
- go build
|
||||
- make log_since_last_release > %TEMP%\git-log.txt
|
||||
- make version > %TEMP%\version
|
||||
- set /p RCLONE_VERSION=<%TEMP%\version
|
||||
- set PATH=%PATHCC32%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
|
||||
- set PATH=%PATHCC64%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
|
||||
|
||||
test_script:
|
||||
- make GOTAGS=cmount quicktest
|
||||
|
||||
artifacts:
|
||||
- path: rclone.exe
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_BRANCH%" == "master" make upload_beta
|
||||
34
.circleci/config.yml
Normal file
34
.circleci/config.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
machine: true
|
||||
|
||||
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=billziss/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
xgo \
|
||||
--targets=android/*,ios/* \
|
||||
.
|
||||
|
||||
- run:
|
||||
name: Prepare artifacts
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
*~
|
||||
*.pyc
|
||||
test-env*
|
||||
_junk/
|
||||
rclone
|
||||
upload
|
||||
build
|
||||
docs/public
|
||||
rclone.iml
|
||||
.idea
|
||||
|
||||
43
.travis.yml
Normal file
43
.travis.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
language: go
|
||||
sudo: false
|
||||
osx_image: xcode7.3
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.6.4
|
||||
- 1.7.6
|
||||
- 1.8.5
|
||||
- 1.9.2
|
||||
- tip
|
||||
install:
|
||||
- git fetch --unshallow --tags
|
||||
- make vars
|
||||
- make build_dep
|
||||
script:
|
||||
- make check
|
||||
- make quicktest
|
||||
- make compile_all
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
matrix:
|
||||
secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.9.2
|
||||
env: GOTAGS=""
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
on:
|
||||
branch: master
|
||||
go: 1.9.2
|
||||
condition: "`uname` == 'Linux'"
|
||||
277
CONTRIBUTING.md
Normal file
277
CONTRIBUTING.md
Normal file
@@ -0,0 +1,277 @@
|
||||
# Contributing to rclone #
|
||||
|
||||
This is a short guide on how to contribute things to rclone.
|
||||
|
||||
## Reporting a bug ##
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug
|
||||
then please use the [rclone forum](https://forum.rclone.org/) instead
|
||||
of filing an issue.
|
||||
|
||||
When filing an issue, please include the following information if
|
||||
possible as well as a description of the problem. Make sure you test
|
||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
|
||||
* Rclone version (eg output from `rclone -V`)
|
||||
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
|
||||
## Submitting a pull request ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via Github.
|
||||
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's Github
|
||||
page](https://github.com/ncw/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
cd $GOPATH/src/github.com/ncw/rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
|
||||
Make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
|
||||
And get hacking.
|
||||
|
||||
When ready - run the unit tests for the code you changed
|
||||
|
||||
go test -v
|
||||
|
||||
Note that you may need to make a test remote, eg `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
Note the top level Makefile targets
|
||||
|
||||
* make check
|
||||
* make test
|
||||
|
||||
Both of these will be run by Travis when you make a pull request but
|
||||
you can do this yourself locally too. These require some extra go
|
||||
packages which you can install with
|
||||
|
||||
* make build_dep
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add documentation for a new feature (see below for where)
|
||||
* Add unit tests for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master `git rebase master`
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push origin my-new-feature
|
||||
|
||||
Go to the Github website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to Github with `--force`.
|
||||
|
||||
## Testing ##
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
|
||||
rclone contains a mixture of unit tests and integration tests.
|
||||
Because it is difficult (and in some respects pointless) to test cloud
|
||||
storage systems by mocking all their interfaces, rclone unit tests can
|
||||
run against any of the backends. This is done by making specially
|
||||
named remotes in the default config file.
|
||||
|
||||
If you wanted to test changes in the `drive` backend, then you would
|
||||
need to make a remote called `TestDrive`.
|
||||
|
||||
You can then run the unit tests in the drive directory. These tests
|
||||
are skipped if `TestDrive:` isn't defined.
|
||||
|
||||
cd drive
|
||||
go test -v
|
||||
|
||||
You can then run the integration tests which tests all of rclone's
|
||||
operations. Normally these get run against the local filing system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd ../fs
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -subdir
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then run in that directory
|
||||
|
||||
go run test_all.go
|
||||
|
||||
## Writing Documentation ##
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
If you add a new flag, then if it is a general flag, document it in
|
||||
`docs/content/docs.md` - the flags there are supposed to be in
|
||||
alphabetical order. If it is a remote specific flag, then document it
|
||||
in `docs/content/remote.md`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
|
||||
Documentation for rclone sub commands is with their code, eg
|
||||
`cmd/ls/ls.go`.
|
||||
|
||||
## Making a release ##
|
||||
|
||||
There are separate instructions for making a release in the RELEASE.md
|
||||
file.
|
||||
|
||||
## Commit messages ##
|
||||
|
||||
Please make the first line of your commit message a summary of the
|
||||
change, and prefix it with the directory of the change followed by a
|
||||
colon. The changelog gets made by looking at just these first lines
|
||||
so make it good!
|
||||
|
||||
If you have more to say about the commit, then enter a blank line and
|
||||
carry on the description. Remember to say why the change was needed -
|
||||
the commit itself shows what was changed.
|
||||
|
||||
If the change fixes an issue then write `Fixes #1234` in the commit
|
||||
message. This can be on the subject line if it will fit. If you
|
||||
don't want to close the associated issue just put `#1234` and the
|
||||
change will get linked into the issue.
|
||||
|
||||
Here is an example of a short commit message:
|
||||
|
||||
```
|
||||
drive: add team drive support - fixes #885
|
||||
```
|
||||
|
||||
And here is an example of a longer one:
|
||||
|
||||
```
|
||||
mount: fix hang on errored upload
|
||||
|
||||
In certain circumstances if an upload failed then the mount could hang
|
||||
indefinitely. This was fixed by closing the read pipe after the Put
|
||||
completed. This will cause the write side to return a pipe closed
|
||||
error fixing the hang.
|
||||
|
||||
Fixes #1498
|
||||
```
|
||||
|
||||
## Adding a dependency ##
|
||||
|
||||
rclone uses the [dep](https://github.com/golang/dep) tool to manage
|
||||
its dependencies. All code that rclone needs for building is stored
|
||||
in the `vendor` directory for perfectly reproducable builds.
|
||||
|
||||
The `vendor` directory is entirely managed by the `dep` tool.
|
||||
|
||||
To add a new dependency
|
||||
|
||||
dep ensure github.com/pkg/errors
|
||||
|
||||
You can add constraints on that package (see the `dep` documentation),
|
||||
but don't unless you really need to.
|
||||
|
||||
Please check in the changes generated by dep including the `vendor`
|
||||
directory and `Godep.toml` and `Godep.locl` in a single commit
|
||||
separate from any other code changes. Watch out for new files in
|
||||
`vendor`.
|
||||
|
||||
## Updating a dependency ##
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
dep ensure -update github.com/pkg/errors
|
||||
|
||||
Check in in a single commit as above.
|
||||
|
||||
## Updating all the dependencies ##
|
||||
|
||||
In order to update all the dependencies then run `make update`. This
|
||||
just runs `dep ensure -update`. Check in the changes in a single
|
||||
commit as above.
|
||||
|
||||
This should be done early in the release cycle to pick up new versions
|
||||
of packages in time for them to get some testing.
|
||||
|
||||
## Updating a backend ##
|
||||
|
||||
If you update a backend then please run the unit tests and the
|
||||
integration tests for that backend.
|
||||
|
||||
Assuming the backend is called `remote`, make create a config entry
|
||||
called `TestRemote` for the tests to use.
|
||||
|
||||
Now `cd remote` and run `go test -v` to run the unit tests.
|
||||
|
||||
Then `cd fs` and run `go test -v -remote TestRemote:` to run the
|
||||
integration tests.
|
||||
|
||||
The next section goes into more detail about the tests.
|
||||
|
||||
## Writing a new backend ##
|
||||
|
||||
Choose a name. The docs here will use `remote` as an example.
|
||||
|
||||
Note that in rclone terminology a file system backend is called a
|
||||
remote or an fs.
|
||||
|
||||
Research
|
||||
|
||||
* Look at the interfaces defined in `fs/fs.go`
|
||||
* Study one or more of the existing remotes
|
||||
|
||||
Getting going
|
||||
|
||||
* Create `remote/remote.go` (copy this from a similar remote)
|
||||
* box is a good one to start from if you have a directory based remote
|
||||
* b2 is a good one to start from if you have a bucket based remote
|
||||
* Add your remote to the imports in `fs/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
* Add your fs to the end of `fstest/fstests/gen_tests.go`
|
||||
* generate `remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
|
||||
* Make sure all tests pass with `go test -v`
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your fs to `fs/test_all.go`
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* If you are making a bucket based remote, then check with this also
|
||||
* `go test -v -remote TestRemote: -subdir`
|
||||
* And if your remote defines `ListR` this also
|
||||
* `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
|
||||
* `README.md` - main Github page
|
||||
* `docs/content/remote.md` - main docs page
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
* `cmd/cmd.go` - the main help for rclone
|
||||
314
Gopkg.lock
generated
Normal file
314
Gopkg.lock
generated
Normal file
@@ -0,0 +1,314 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
packages = [".","fs","fuseutil"]
|
||||
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "f6de2c509ed9d2af648c3c147207eaaf97149aed"
|
||||
version = "v0.14.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "2592daf71ab6b95dcfc7f7437ecc1afb9ddb7360"
|
||||
version = "v11.0.0-beta"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||
revision = "f6be1abbb5abd0517522f850dd785990d373da7e"
|
||||
version = "v8.4.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
packages = ["."]
|
||||
revision = "87a46d97951ee1ea20ed3b24c25646a79e87ba5d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
packages = ["."]
|
||||
revision = "43880d236f695d39c62cf7aa4ebd4508c258e6c0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/a8m/tree"
|
||||
packages = ["."]
|
||||
revision = "5554ed4554293f11a726accc1ebf2bd3342742f8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/s3iface","service/s3/s3manager","service/sts"]
|
||||
revision = "5a2026bfb28e86839f9fcc46523850319399006c"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
packages = ["fuse"]
|
||||
revision = "3a24389863c5bf906de391226ee8c4ec2c925bfe"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
packages = ["."]
|
||||
revision = "a148de800f91fe6cbd8b2a472bbfdc09c4b6568f"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
revision = "1d903dcb749992f3741d744c0f8376b4bd7eb3e1"
|
||||
version = "v1.0.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
|
||||
version = "v3.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/djherbis/times"
|
||||
packages = ["."]
|
||||
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
packages = ["dropbox","dropbox/async","dropbox/file_properties","dropbox/files"]
|
||||
revision = "9befe8c89b6e17667716dedd2f62327bae374bf2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
|
||||
version = "v1.28.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/go-querystring"
|
||||
packages = ["query"]
|
||||
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/jlaffaye/ftp"
|
||||
packages = ["."]
|
||||
revision = "299b7ff5b6096588cceca2edc1fc9f557002fb85"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
packages = ["."]
|
||||
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
|
||||
version = "v0.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
packages = ["."]
|
||||
revision = "96a49aad3fc3889629f2eceb004927386884bd92"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "c95c6e5c2d1a3d37fc44c8c6dc9e231c7500667d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
revision = "4ed959e0540971545eddb8c75514973d670cf739"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/okzk/sdnotify"
|
||||
packages = ["."]
|
||||
revision = "ed8ca104421a21947710335006107540e3ecb335"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
packages = ["."]
|
||||
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pengsrc/go-shared"
|
||||
packages = ["check","convert","json","yaml"]
|
||||
revision = "454950d6a0782c34427d4f29b46c6bf447256f20"
|
||||
version = "v0.0.8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "2b3a18b5f0fb6b4f9190549597d3f962c02bc5eb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "7c1f7a370726a2457b33b29baefc2402b4965c65"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
packages = ["."]
|
||||
revision = "7c8316a9cb0a6af865265f899f5de6aadb31a24b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
revision = "4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c"
|
||||
version = "v1.5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/uuid"
|
||||
packages = ["."]
|
||||
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
packages = ["open"]
|
||||
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [".","doc"]
|
||||
revision = "e5f66de850af3302fbe378c8acded2b0fa55472c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "7aff26db30c1be810f9de5038ec5ef96ac41fd7c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert","require"]
|
||||
revision = "890a5c3458b43e6104ff5da8dfa139d013d77544"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/xanzy/ssh-agent"
|
||||
packages = ["."]
|
||||
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
packages = [".","config","logger","request","request/builder","request/data","request/errors","request/signer","request/unpacker","service","utils"]
|
||||
revision = "088fbd27bd49adf215d02a05c36c5ac2d243d1f1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","nacl/secretbox","pbkdf2","poly1305","salsa20/salsa","scrypt","ssh","ssh/agent","ssh/terminal"]
|
||||
revision = "76eec36fa14229c4b25bb894c2d0e591527af429"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","html","html/atom","webdav","webdav/internal/xml"]
|
||||
revision = "0a9397675ba34b2845f758fe3cd68828369c6517"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "314a259e304ff91bd6985da2a7149bbf91237993"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm"]
|
||||
revision = "1cbadb444a806fd9430d14ad08967ed91da4fa0a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "6dc17368e09b0e8634d71cac8168d853e869a0c7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["drive/v2","gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
|
||||
revision = "906273f42cdebd65de3a53f30dd9e23de1b55ba9"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","log","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "4dfe75b3ebfff3e7b40e2983d8c014f9e4d98fed08c3fe454f1dc890589e6960"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
150
Gopkg.toml
Normal file
150
Gopkg.toml
Normal file
@@ -0,0 +1,150 @@
|
||||
|
||||
## Gopkg.toml example (these lines may be deleted)
|
||||
|
||||
## "required" lists a set of packages (not projects) that must be included in
|
||||
## Gopkg.lock. This list is merged with the set of packages imported by the current
|
||||
## project. Use it when your project needs a package it doesn't explicitly import -
|
||||
## including "main" packages.
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
|
||||
## "ignored" lists a set of packages (not projects) that are ignored when
|
||||
## dep statically analyzes source code. Ignored packages can be in this project,
|
||||
## or in a dependency.
|
||||
# ignored = ["github.com/user/project/badpkg"]
|
||||
|
||||
## Dependencies define constraints on dependent projects. They are respected by
|
||||
## dep whether coming from the Gopkg.toml of the current project or a dependency.
|
||||
# [[constraint]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Recommended: the version constraint to enforce for the project.
|
||||
## Only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: an alternate location (URL or import path) for the project's source.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
## Overrides have the same structure as [[constraint]], but supercede all
|
||||
## [[constraint]] declarations from all projects. Only the current project's
|
||||
## [[override]] are applied.
|
||||
##
|
||||
## Overrides are a sledgehammer. Use them only as a last resort.
|
||||
# [[override]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Optional: specifying a version constraint override will cause all other
|
||||
## constraints on this project to be ignored; only the overriden constraint
|
||||
## need be satisfied.
|
||||
## Again, only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: specifying an alternate source location as an override will
|
||||
## enforce that the alternate location is used for that project, regardless of
|
||||
## what source location any dependent projects specify.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stacktic/dropbox"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/okzk/sdnotify"
|
||||
17
ISSUE_TEMPLATE.md
Normal file
17
ISSUE_TEMPLATE.md
Normal file
@@ -0,0 +1,17 @@
|
||||
When filing an issue, please include the following information if possible as well as a description of the problem. Make sure you test with the latest beta of rclone.
|
||||
|
||||
https://beta.rclone.org/
|
||||
https://rclone.org/downloads/
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the [rclone forum](https://forum.rclone.org/) instead of filing an issue.
|
||||
|
||||
> What is your rclone version (eg output from `rclone -V`)
|
||||
|
||||
> Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
> Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
> The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
> A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
86
MAINTAINERS.md
Normal file
86
MAINTAINERS.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are
|
||||
|
||||
* Nick Craig-Wood @ncw
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
|
||||
|
||||
## Triaging Tickets ##
|
||||
|
||||
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
|
||||
|
||||
Rclone uses the labels like this:
|
||||
|
||||
* `bug` - a definite verified bug
|
||||
* `can't reproduce` - a problem which we can't reproduce
|
||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
* `enhancement: new remote` - a new rclone backend
|
||||
* `enhancement` - a new feature
|
||||
* `FUSE` - do do with `rclone mount` command
|
||||
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
* `maintenance` - internal enhancement, code re-organisation etc
|
||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
* `Remote: XXX` - which rclone backend this affects
|
||||
* `thinking` - not decided on the course of action yet
|
||||
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
|
||||
|
||||
The milestones have these meanings:
|
||||
|
||||
* v1.XX - stuff we would like to fit into this release
|
||||
* v1.XX+1 - stuff we are leaving until the next release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
|
||||
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
|
||||
## Closing Tickets ##
|
||||
|
||||
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||
|
||||
## Pull requests ##
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
|
||||
|
||||
## Merges ##
|
||||
|
||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||
|
||||
## Release cycle ##
|
||||
|
||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
|
||||
|
||||
High impact regressions should be fixed before the next release.
|
||||
|
||||
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
|
||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
|
||||
## Mailing list ##
|
||||
|
||||
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
|
||||
## TODO ##
|
||||
|
||||
I should probably make a dev@rclone.org to register with cloud providers.
|
||||
6459
MANUAL.html
Normal file
6459
MANUAL.html
Normal file
File diff suppressed because it is too large
Load Diff
9702
MANUAL.txt
Normal file
9702
MANUAL.txt
Normal file
File diff suppressed because it is too large
Load Diff
176
Makefile
Normal file
176
Makefile
Normal file
@@ -0,0 +1,176 @@
|
||||
SHELL = /bin/bash
|
||||
TAG := $(shell echo `git describe --abbrev=8 --tags`-`git rev-parse --abbrev-ref HEAD` | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
GO_LATEST := $(findstring go1.9,$(GO_VERSION))
|
||||
BETA_URL := https://beta.rclone.org/$(TAG)/
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
endif
|
||||
|
||||
.PHONY: rclone vars version
|
||||
|
||||
rclone:
|
||||
touch fs/version.go
|
||||
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
cp -av `go env GOPATH`/bin/rclone .
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo GO_LATEST="'$(GO_LATEST)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
version:
|
||||
@echo '$(TAG)'
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
-go test $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
|
||||
-cd fs && go run $(BUILDTAGS) test_all.go 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test.log and fs/test_all.log"
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||
ifdef GO_LATEST
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||
endif
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
ifdef GO_LATEST
|
||||
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
errcheck $(BUILDTAGS) ./...
|
||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
else
|
||||
@echo Skipping tests as not on Go stable
|
||||
endif
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef GO_LATEST
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get -u github.com/inconshreveable/mousetrap
|
||||
go get -u github.com/tools/godep
|
||||
endif
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
dep ensure -update -v
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
|
||||
|
||||
MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
rclone gendocs docs/content/commands/
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||
|
||||
clean:
|
||||
go clean ./...
|
||||
find . -name \*~ | xargs -r rm -f
|
||||
rm -rf build docs/public
|
||||
rm -f rclone fs/fs.test fs/test_all.log test.log
|
||||
|
||||
website:
|
||||
cd docs && hugo
|
||||
|
||||
upload_website: website
|
||||
rclone -v sync docs/public memstore:www-rclone-org
|
||||
|
||||
upload:
|
||||
rclone -v copy build/ memstore:downloads-rclone-org
|
||||
|
||||
upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
|
||||
|
||||
log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
upload_beta:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
compile_all:
|
||||
ifdef GO_LATEST
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
|
||||
else
|
||||
@echo Skipping compile all as not on Go stable
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt -exclude "^windows/" -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the windows builds from appveyor
|
||||
fetch_windows:
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' memstore:beta-rclone-org/$(TAG) build/
|
||||
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
|
||||
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
|
||||
md5sum build/rclone-*-windows-*.zip | sort
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
tag: doc
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/shortcodes/version.html
|
||||
git tag $(NEW_TAG)
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
|
||||
@echo "Then commit the changes"
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
git tag -f $(LAST_TAG)
|
||||
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
|
||||
gen_tests:
|
||||
cd fstest/fstests && go generate
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
351
README.md
351
README.md
@@ -1,330 +1,59 @@
|
||||
Rclone
|
||||
======
|
||||
[](https://rclone.org/)
|
||||
|
||||
[](http://rclone.org/)
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
Sync files and directories to and from
|
||||
[](https://travis-ci.org/ncw/rclone)
|
||||
[](https://ci.appveyor.com/project/ncw/rclone)
|
||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||
[](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
* Amazon Drive
|
||||
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
|
||||
* Backblaze B2
|
||||
* Box
|
||||
* Dropbox
|
||||
* FTP
|
||||
* Google Cloud Storage
|
||||
* Google Drive
|
||||
* Amazon S3
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
|
||||
* pCloud
|
||||
* QingStor
|
||||
* SFTP
|
||||
* Webdav / Owncloud / Nextcloud
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
|
||||
* MD5SUMs checked at all times for file integrity
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync mode to make a directory identical
|
||||
* Check mode to check all MD5SUMs
|
||||
* Can sync to and from network, eg two different Drive accounts
|
||||
* Sync (one way) mode to make a directory identical
|
||||
* Check mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
* Optional encryption (Crypt)
|
||||
* Optional FUSE mount
|
||||
|
||||
Home page
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
|
||||
* http://rclone.org/
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
Download the relevant binary from
|
||||
|
||||
* http://www.craig-wood.com/nick/pub/rclone/
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`.
|
||||
|
||||
You can then modify the source and submit patches.
|
||||
|
||||
Configure
|
||||
---------
|
||||
|
||||
First you'll need to configure rclone. As the object storage systems
|
||||
have quite complicated authentication these are kept in a config file
|
||||
`.rclone.conf` in your home directory by default. (You can use the
|
||||
-config option to choose a different config file.)
|
||||
|
||||
The easiest way to make the config is to run rclone with the config
|
||||
option, Eg
|
||||
|
||||
rclone config
|
||||
|
||||
Here is an example of making an s3 configuration
|
||||
|
||||
```
|
||||
$ rclone config
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
q) Quit config
|
||||
n/q> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) swift
|
||||
2) s3
|
||||
3) local
|
||||
4) drive
|
||||
type> 2
|
||||
AWS Access Key ID.
|
||||
access_key_id> accesskey
|
||||
AWS Secret Access Key (password).
|
||||
secret_access_key> secretaccesskey
|
||||
Endpoint for S3 API.
|
||||
Choose a number from below, or type in your own value
|
||||
* The default endpoint - a good choice if you are unsure.
|
||||
* US Region, Northern Virginia or Pacific Northwest.
|
||||
* Leave location constraint empty.
|
||||
1) https://s3.amazonaws.com/
|
||||
* US Region, Northern Virginia only.
|
||||
* Leave location constraint empty.
|
||||
2) https://s3-external-1.amazonaws.com
|
||||
[snip]
|
||||
* South America (Sao Paulo) Region
|
||||
* Needs location constraint sa-east-1.
|
||||
9) https://s3-sa-east-1.amazonaws.com
|
||||
endpoint> 1
|
||||
Location constraint - must be set to match the Endpoint.
|
||||
Choose a number from below, or type in your own value
|
||||
* Empty for US Region, Northern Virginia or Pacific Northwest.
|
||||
1)
|
||||
* US West (Oregon) Region.
|
||||
2) us-west-2
|
||||
[snip]
|
||||
* South America (Sao Paulo) Region.
|
||||
9) sa-east-1
|
||||
location_constraint> 1
|
||||
--------------------
|
||||
[remote]
|
||||
access_key_id = accesskey
|
||||
secret_access_key = secretaccesskey
|
||||
endpoint = https://s3.amazonaws.com/
|
||||
location_constraint =
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
remote s3
|
||||
|
||||
e) Edit existing remote
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> q
|
||||
```
|
||||
|
||||
This can now be used like this
|
||||
|
||||
```
|
||||
rclone lsd remote: - see all buckets/containers
|
||||
rclone ls remote: - list a bucket
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
```
|
||||
|
||||
See the next section for more details.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Rclone syncs a directory tree from local to remote.
|
||||
|
||||
Its basic syntax is like this
|
||||
|
||||
Syntax: [options] subcommand <parameters> <parameters...>
|
||||
|
||||
See below for how to specify the source and destination paths.
|
||||
|
||||
Subcommands
|
||||
-----------
|
||||
|
||||
rclone copy source:path dest:path
|
||||
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing first by modification time then by
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
|
||||
rclone sync source:path dest:path
|
||||
|
||||
Sync the source to the destination. Doesn't transfer
|
||||
unchanged files, testing first by modification time then by
|
||||
MD5SUM. Deletes any files that exist in source that don't
|
||||
exist in destination. Since this can cause data loss, test
|
||||
first with the -dry-run flag.
|
||||
|
||||
rclone ls [remote:path]
|
||||
|
||||
List all the objects in the the path.
|
||||
|
||||
rclone lsd [remote:path]
|
||||
|
||||
List all directoryes/objects/buckets in the the path.
|
||||
|
||||
rclone mkdir remote:path
|
||||
|
||||
Make the path if it doesn't already exist
|
||||
|
||||
rclone rmdir remote:path
|
||||
|
||||
Remove the path. Note that you can't remove a path with
|
||||
objects in it, use purge for that.
|
||||
|
||||
rclone purge remote:path
|
||||
|
||||
Remove the path and all of its contents.
|
||||
|
||||
rclone check source:path dest:path
|
||||
|
||||
Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.
|
||||
|
||||
General options:
|
||||
* `-config` Location of the config file
|
||||
* `-transfers=4`: Number of file transfers to run in parallel.
|
||||
* `-checkers=8`: Number of MD5SUM checkers to run in parallel.
|
||||
* `-dry-run=false`: Do a trial run with no permanent changes
|
||||
* `-modify-window=1ns`: Max time difference to be considered the same - this is automatically set usually
|
||||
* `-quiet=false`: Print as little stuff as possible
|
||||
* `-stats=1m0s`: Interval to print stats
|
||||
* `-verbose=false`: Print lots more stuff
|
||||
|
||||
Developer options:
|
||||
* `-cpuprofile=""`: Write cpu profile to file
|
||||
|
||||
Local Filesystem
|
||||
----------------
|
||||
|
||||
Paths are specified as normal filesystem paths, so
|
||||
|
||||
rclone sync /home/source /tmp/destination
|
||||
|
||||
Will sync source to destination
|
||||
|
||||
Swift / Rackspace cloudfiles / Memset Memstore
|
||||
----------------------------------------------
|
||||
|
||||
Paths are specified as remote:container (or remote: for the `lsd`
|
||||
command.)
|
||||
|
||||
So to copy a local directory to a swift container called backup:
|
||||
|
||||
rclone sync /home/source swift:backup
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Object-Meta-Mtime` as floating point since the epoch.
|
||||
|
||||
This is a defacto standard (used in the official python-swiftclient
|
||||
amongst others) for storing the modification time (as read using
|
||||
os.Stat) for an object.
|
||||
|
||||
Amazon S3
|
||||
---------
|
||||
|
||||
Paths are specified as remote:bucket
|
||||
|
||||
So to copy a local directory to a s3 container called backup
|
||||
|
||||
rclone sync /home/source s3:backup
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Amz-Meta-Mtime` as floating point since the epoch.
|
||||
|
||||
Google drive
|
||||
------------
|
||||
|
||||
Paths are specified as drive:path Drive paths may be as deep as required.
|
||||
|
||||
The initial setup for drive involves getting a token from Google drive
|
||||
which you need to do in your browser. The `rclone config` walks you
|
||||
through it.
|
||||
|
||||
Here is an example of how to make a remote called `drv`
|
||||
|
||||
```
|
||||
$ ./rclone config
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> drv
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) swift
|
||||
2) s3
|
||||
3) local
|
||||
4) drive
|
||||
type> 4
|
||||
Google Application Client Id - leave blank to use rclone's.
|
||||
client_id>
|
||||
Google Application Client Secret - leave blank to use rclone's.
|
||||
client_secret>
|
||||
Remote config
|
||||
Go to the following link in your browser
|
||||
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3XXXXX%3Awg%3Aoauth%3XX.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&state=state
|
||||
Log in, then type paste the token that is returned in the browser here
|
||||
Enter verification code> X/XXXXXXXXXXXXXXXXXX-XXXXXXXXX.XXXXXXXXX-XXXXX_XXXXXXX_XXXXXXX
|
||||
--------------------
|
||||
[drv]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = {"AccessToken":"xxxx.xxxxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx","Expiry":"2014-03-16T13:57:58.955387075Z","Extra":null}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
You can then use it like this
|
||||
|
||||
rclone lsd drv:
|
||||
rclone ls drv:
|
||||
|
||||
To copy a local directory to a drive directory called backup
|
||||
|
||||
rclone copy /home/source drv:backup
|
||||
|
||||
Google drive stores modification times accurate to 1 ms.
|
||||
* https://rclone.org/
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
|
||||
Bugs
|
||||
----
|
||||
|
||||
* Doesn't sync individual files yet, only directories.
|
||||
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
|
||||
* quota is 100.0 requests/second/user
|
||||
* Empty directories left behind with Local and Drive
|
||||
* eg purging a local directory with subdirectories doesn't work
|
||||
|
||||
Contact and support
|
||||
-------------------
|
||||
|
||||
The project website is at:
|
||||
|
||||
* https://github.com/ncw/rclone
|
||||
|
||||
There you can file bug reports, ask for help or contribute patches.
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
* Nick Craig-Wood <nick@craig-wood.com>
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
* Your name goes here!
|
||||
|
||||
35
RELEASE.md
Normal file
35
RELEASE.md
Normal file
@@ -0,0 +1,35 @@
|
||||
Extra required software for making a release
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
Making a release
|
||||
* git status - make sure everything is checked in
|
||||
* Check travis & appveyor builds are green
|
||||
* make check
|
||||
* make test
|
||||
* make tag
|
||||
* edit docs/content/changelog.md
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX"
|
||||
* make retag
|
||||
* # Set the GOPATH for a current stable go compiler
|
||||
* make cross
|
||||
* git push --tags origin master
|
||||
* git push --tags origin master:stable # update the stable branch for packager.io
|
||||
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
|
||||
* make fetch_windows
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
* make startdev
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
* carry forward any patches to vendor stuff
|
||||
* git commit -a -v
|
||||
|
||||
Make the version number be just in a file?
|
||||
1214
amazonclouddrive/amazonclouddrive.go
Normal file
1214
amazonclouddrive/amazonclouddrive.go
Normal file
File diff suppressed because it is too large
Load Diff
73
amazonclouddrive/amazonclouddrive_test.go
Normal file
73
amazonclouddrive/amazonclouddrive_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/amazonclouddrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
1116
azureblob/azureblob.go
Normal file
1116
azureblob/azureblob.go
Normal file
File diff suppressed because it is too large
Load Diff
76
azureblob/azureblob_test.go
Normal file
76
azureblob/azureblob_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package azureblob_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/azureblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*azureblob.Object)(nil))
|
||||
fstests.RemoteName = "TestAzureBlob:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
6
azureblob/azureblob_unsupported.go
Normal file
6
azureblob/azureblob_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package azureblob
|
||||
301
b2/api/types.go
Normal file
301
b2/api/types.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
type Error struct {
|
||||
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
|
||||
Code string `json:"code"` // A single-identifier code that identifies the error.
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
return e.Status == 403 // 403 errors shouldn't be retried
|
||||
}
|
||||
|
||||
var _ fs.Fataler = (*Error)(nil)
|
||||
|
||||
// Account describes a B2 account
|
||||
type Account struct {
|
||||
ID string `json:"accountId"` // The identifier for the account.
|
||||
}
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||
// fits in a 64 bit integer such as the type "long" in the programming
|
||||
// language Java. It is intended to be compatible with Java's time
|
||||
// long. For example, it can be passed directly into the java call
|
||||
// Date.setTime(long time).
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
timestamp, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := (time.Time)(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
//
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return (time.Time)(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if (time.Time)(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if (time.Time)(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return (time.Time)(t).Equal((time.Time)(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
type File struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
Size int64 `json:"size"` // The number of bytes in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
type ListBucketsResponse struct {
|
||||
Buckets []Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesRequest struct {
|
||||
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
|
||||
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
|
||||
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
|
||||
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
|
||||
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
|
||||
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
|
||||
}
|
||||
|
||||
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesResponse struct {
|
||||
Files []File `json:"files"` // An array of objects, each one describing one file.
|
||||
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
|
||||
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
|
||||
}
|
||||
|
||||
// GetUploadURLRequest is passed to b2_get_upload_url
|
||||
type GetUploadURLRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
}
|
||||
|
||||
// GetUploadURLResponse is received from b2_get_upload_url
|
||||
type GetUploadURLResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||
}
|
||||
|
||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||
type FileInfo struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
AccountID string `json:"accountId"` // Your account ID.
|
||||
BucketID string `json:"bucketId"` // The bucket that the file is in.
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// CreateBucketRequest is used to create a bucket
|
||||
type CreateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// DeleteBucketRequest is used to create a bucket
|
||||
type DeleteBucketRequest struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
}
|
||||
|
||||
// DeleteFileRequest is used to delete a file version
|
||||
type DeleteFileRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
}
|
||||
|
||||
// HideFileRequest is used to delete a file
|
||||
type HideFileRequest struct {
|
||||
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
|
||||
Name string `json:"fileName"` // The name of the file to hide.
|
||||
}
|
||||
|
||||
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
|
||||
type GetFileInfoRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
}
|
||||
|
||||
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
|
||||
//
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
// Java call Date.setTime(long time).
|
||||
//
|
||||
// If the caller knows the SHA1 of the entire large file being
|
||||
// uploaded, Backblaze recommends using large_file_sha1 as the name,
|
||||
// and a 40 byte hex string representing the SHA1.
|
||||
//
|
||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
}
|
||||
|
||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||
type GetUploadPartURLRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLResponse is received from b2_get_upload_url
|
||||
type GetUploadPartURLResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
|
||||
}
|
||||
|
||||
// UploadPartResponse is the response to b2_upload_part
|
||||
type UploadPartResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
}
|
||||
|
||||
// FinishLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
|
||||
//
|
||||
// Large files do not have a SHA1 checksum. The value will always be "none".
|
||||
type FinishLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
|
||||
}
|
||||
|
||||
// CancelLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a CancelLargeFileResponse
|
||||
type CancelLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// CancelLargeFileResponse is the response to CancelLargeFileRequest
|
||||
type CancelLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
}
|
||||
87
b2/api/types_test.go
Normal file
87
b2/api/types_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyT api.Timestamp
|
||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||
)
|
||||
|
||||
func TestTimestampMarshalJSON(t *testing.T) {
|
||||
resB, err := t0.MarshalJSON()
|
||||
res := string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "3661123", res)
|
||||
|
||||
resB, err = t1.MarshalJSON()
|
||||
res = string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "981173106123", res)
|
||||
}
|
||||
|
||||
func TestTimestampUnmarshalJSON(t *testing.T) {
|
||||
var tActual api.Timestamp
|
||||
err := tActual.UnmarshalJSON([]byte("981173106123"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||
}
|
||||
|
||||
func TestTimestampAddVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
t api.Timestamp
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||
{t1, "", "-v2001-02-03-040506-123"},
|
||||
} {
|
||||
actual := test.t.AddVersion(test.in)
|
||||
assert.Equal(t, test.expected, actual, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampRemoveVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedT api.Timestamp
|
||||
expectedRemote string
|
||||
}{
|
||||
{"potato.txt", emptyT, "potato.txt"},
|
||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||
{"-v2001-02-03-040506-123", t1, ""},
|
||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||
} {
|
||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsZero(t *testing.T) {
|
||||
assert.True(t, emptyT.IsZero())
|
||||
assert.False(t, t0.IsZero())
|
||||
assert.False(t, t1.IsZero())
|
||||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
}
|
||||
170
b2/b2_internal_test.go
Normal file
170
b2/b2_internal_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package b2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fstest"
|
||||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||
|
||||
var encodeTest = []struct {
|
||||
fullyEncoded string
|
||||
minimallyEncoded string
|
||||
plainText string
|
||||
}{
|
||||
{fullyEncoded: "%20", minimallyEncoded: "+", plainText: " "},
|
||||
{fullyEncoded: "%21", minimallyEncoded: "!", plainText: "!"},
|
||||
{fullyEncoded: "%22", minimallyEncoded: "%22", plainText: "\""},
|
||||
{fullyEncoded: "%23", minimallyEncoded: "%23", plainText: "#"},
|
||||
{fullyEncoded: "%24", minimallyEncoded: "$", plainText: "$"},
|
||||
{fullyEncoded: "%25", minimallyEncoded: "%25", plainText: "%"},
|
||||
{fullyEncoded: "%26", minimallyEncoded: "%26", plainText: "&"},
|
||||
{fullyEncoded: "%27", minimallyEncoded: "'", plainText: "'"},
|
||||
{fullyEncoded: "%28", minimallyEncoded: "(", plainText: "("},
|
||||
{fullyEncoded: "%29", minimallyEncoded: ")", plainText: ")"},
|
||||
{fullyEncoded: "%2A", minimallyEncoded: "*", plainText: "*"},
|
||||
{fullyEncoded: "%2B", minimallyEncoded: "%2B", plainText: "+"},
|
||||
{fullyEncoded: "%2C", minimallyEncoded: "%2C", plainText: ","},
|
||||
{fullyEncoded: "%2D", minimallyEncoded: "-", plainText: "-"},
|
||||
{fullyEncoded: "%2E", minimallyEncoded: ".", plainText: "."},
|
||||
{fullyEncoded: "%2F", minimallyEncoded: "/", plainText: "/"},
|
||||
{fullyEncoded: "%30", minimallyEncoded: "0", plainText: "0"},
|
||||
{fullyEncoded: "%31", minimallyEncoded: "1", plainText: "1"},
|
||||
{fullyEncoded: "%32", minimallyEncoded: "2", plainText: "2"},
|
||||
{fullyEncoded: "%33", minimallyEncoded: "3", plainText: "3"},
|
||||
{fullyEncoded: "%34", minimallyEncoded: "4", plainText: "4"},
|
||||
{fullyEncoded: "%35", minimallyEncoded: "5", plainText: "5"},
|
||||
{fullyEncoded: "%36", minimallyEncoded: "6", plainText: "6"},
|
||||
{fullyEncoded: "%37", minimallyEncoded: "7", plainText: "7"},
|
||||
{fullyEncoded: "%38", minimallyEncoded: "8", plainText: "8"},
|
||||
{fullyEncoded: "%39", minimallyEncoded: "9", plainText: "9"},
|
||||
{fullyEncoded: "%3A", minimallyEncoded: ":", plainText: ":"},
|
||||
{fullyEncoded: "%3B", minimallyEncoded: ";", plainText: ";"},
|
||||
{fullyEncoded: "%3C", minimallyEncoded: "%3C", plainText: "<"},
|
||||
{fullyEncoded: "%3D", minimallyEncoded: "=", plainText: "="},
|
||||
{fullyEncoded: "%3E", minimallyEncoded: "%3E", plainText: ">"},
|
||||
{fullyEncoded: "%3F", minimallyEncoded: "%3F", plainText: "?"},
|
||||
{fullyEncoded: "%40", minimallyEncoded: "@", plainText: "@"},
|
||||
{fullyEncoded: "%41", minimallyEncoded: "A", plainText: "A"},
|
||||
{fullyEncoded: "%42", minimallyEncoded: "B", plainText: "B"},
|
||||
{fullyEncoded: "%43", minimallyEncoded: "C", plainText: "C"},
|
||||
{fullyEncoded: "%44", minimallyEncoded: "D", plainText: "D"},
|
||||
{fullyEncoded: "%45", minimallyEncoded: "E", plainText: "E"},
|
||||
{fullyEncoded: "%46", minimallyEncoded: "F", plainText: "F"},
|
||||
{fullyEncoded: "%47", minimallyEncoded: "G", plainText: "G"},
|
||||
{fullyEncoded: "%48", minimallyEncoded: "H", plainText: "H"},
|
||||
{fullyEncoded: "%49", minimallyEncoded: "I", plainText: "I"},
|
||||
{fullyEncoded: "%4A", minimallyEncoded: "J", plainText: "J"},
|
||||
{fullyEncoded: "%4B", minimallyEncoded: "K", plainText: "K"},
|
||||
{fullyEncoded: "%4C", minimallyEncoded: "L", plainText: "L"},
|
||||
{fullyEncoded: "%4D", minimallyEncoded: "M", plainText: "M"},
|
||||
{fullyEncoded: "%4E", minimallyEncoded: "N", plainText: "N"},
|
||||
{fullyEncoded: "%4F", minimallyEncoded: "O", plainText: "O"},
|
||||
{fullyEncoded: "%50", minimallyEncoded: "P", plainText: "P"},
|
||||
{fullyEncoded: "%51", minimallyEncoded: "Q", plainText: "Q"},
|
||||
{fullyEncoded: "%52", minimallyEncoded: "R", plainText: "R"},
|
||||
{fullyEncoded: "%53", minimallyEncoded: "S", plainText: "S"},
|
||||
{fullyEncoded: "%54", minimallyEncoded: "T", plainText: "T"},
|
||||
{fullyEncoded: "%55", minimallyEncoded: "U", plainText: "U"},
|
||||
{fullyEncoded: "%56", minimallyEncoded: "V", plainText: "V"},
|
||||
{fullyEncoded: "%57", minimallyEncoded: "W", plainText: "W"},
|
||||
{fullyEncoded: "%58", minimallyEncoded: "X", plainText: "X"},
|
||||
{fullyEncoded: "%59", minimallyEncoded: "Y", plainText: "Y"},
|
||||
{fullyEncoded: "%5A", minimallyEncoded: "Z", plainText: "Z"},
|
||||
{fullyEncoded: "%5B", minimallyEncoded: "%5B", plainText: "["},
|
||||
{fullyEncoded: "%5C", minimallyEncoded: "%5C", plainText: "\\"},
|
||||
{fullyEncoded: "%5D", minimallyEncoded: "%5D", plainText: "]"},
|
||||
{fullyEncoded: "%5E", minimallyEncoded: "%5E", plainText: "^"},
|
||||
{fullyEncoded: "%5F", minimallyEncoded: "_", plainText: "_"},
|
||||
{fullyEncoded: "%60", minimallyEncoded: "%60", plainText: "`"},
|
||||
{fullyEncoded: "%61", minimallyEncoded: "a", plainText: "a"},
|
||||
{fullyEncoded: "%62", minimallyEncoded: "b", plainText: "b"},
|
||||
{fullyEncoded: "%63", minimallyEncoded: "c", plainText: "c"},
|
||||
{fullyEncoded: "%64", minimallyEncoded: "d", plainText: "d"},
|
||||
{fullyEncoded: "%65", minimallyEncoded: "e", plainText: "e"},
|
||||
{fullyEncoded: "%66", minimallyEncoded: "f", plainText: "f"},
|
||||
{fullyEncoded: "%67", minimallyEncoded: "g", plainText: "g"},
|
||||
{fullyEncoded: "%68", minimallyEncoded: "h", plainText: "h"},
|
||||
{fullyEncoded: "%69", minimallyEncoded: "i", plainText: "i"},
|
||||
{fullyEncoded: "%6A", minimallyEncoded: "j", plainText: "j"},
|
||||
{fullyEncoded: "%6B", minimallyEncoded: "k", plainText: "k"},
|
||||
{fullyEncoded: "%6C", minimallyEncoded: "l", plainText: "l"},
|
||||
{fullyEncoded: "%6D", minimallyEncoded: "m", plainText: "m"},
|
||||
{fullyEncoded: "%6E", minimallyEncoded: "n", plainText: "n"},
|
||||
{fullyEncoded: "%6F", minimallyEncoded: "o", plainText: "o"},
|
||||
{fullyEncoded: "%70", minimallyEncoded: "p", plainText: "p"},
|
||||
{fullyEncoded: "%71", minimallyEncoded: "q", plainText: "q"},
|
||||
{fullyEncoded: "%72", minimallyEncoded: "r", plainText: "r"},
|
||||
{fullyEncoded: "%73", minimallyEncoded: "s", plainText: "s"},
|
||||
{fullyEncoded: "%74", minimallyEncoded: "t", plainText: "t"},
|
||||
{fullyEncoded: "%75", minimallyEncoded: "u", plainText: "u"},
|
||||
{fullyEncoded: "%76", minimallyEncoded: "v", plainText: "v"},
|
||||
{fullyEncoded: "%77", minimallyEncoded: "w", plainText: "w"},
|
||||
{fullyEncoded: "%78", minimallyEncoded: "x", plainText: "x"},
|
||||
{fullyEncoded: "%79", minimallyEncoded: "y", plainText: "y"},
|
||||
{fullyEncoded: "%7A", minimallyEncoded: "z", plainText: "z"},
|
||||
{fullyEncoded: "%7B", minimallyEncoded: "%7B", plainText: "{"},
|
||||
{fullyEncoded: "%7C", minimallyEncoded: "%7C", plainText: "|"},
|
||||
{fullyEncoded: "%7D", minimallyEncoded: "%7D", plainText: "}"},
|
||||
{fullyEncoded: "%7E", minimallyEncoded: "~", plainText: "~"},
|
||||
{fullyEncoded: "%7F", minimallyEncoded: "%7F", plainText: "\u007f"},
|
||||
{fullyEncoded: "%E8%87%AA%E7%94%B1", minimallyEncoded: "%E8%87%AA%E7%94%B1", plainText: "自由"},
|
||||
{fullyEncoded: "%F0%90%90%80", minimallyEncoded: "%F0%90%90%80", plainText: "𐐀"},
|
||||
}
|
||||
|
||||
func TestUrlEncode(t *testing.T) {
|
||||
for _, test := range encodeTest {
|
||||
got := urlEncode(test.plainText)
|
||||
if got != test.minimallyEncoded && got != test.fullyEncoded {
|
||||
t.Errorf("urlEncode(%q) got %q wanted %q or %q", test.plainText, got, test.minimallyEncoded, test.fullyEncoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in time.Time
|
||||
want string
|
||||
}{
|
||||
{fstest.Time("1970-01-01T00:00:00.000000000Z"), "0"},
|
||||
{fstest.Time("2001-02-03T04:05:10.123123123Z"), "981173110123"},
|
||||
{fstest.Time("2001-02-03T05:05:10.123123123+01:00"), "981173110123"},
|
||||
} {
|
||||
got := timeString(test.in)
|
||||
if test.want != got {
|
||||
t.Logf("%v: want %v got %v", test.in, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseTimeString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want time.Time
|
||||
wantError string
|
||||
}{
|
||||
{"0", fstest.Time("1970-01-01T00:00:00.000000000Z"), ""},
|
||||
{"981173110123", fstest.Time("2001-02-03T04:05:10.123000000Z"), ""},
|
||||
{"", time.Time{}, ""},
|
||||
{"potato", time.Time{}, `strconv.ParseInt: parsing "potato": invalid syntax`},
|
||||
} {
|
||||
o := Object{}
|
||||
err := o.parseTimeString(test.in)
|
||||
got := o.modTime
|
||||
var gotError string
|
||||
if err != nil {
|
||||
gotError = err.Error()
|
||||
}
|
||||
if test.want != got {
|
||||
t.Logf("%v: want %v got %v", test.in, test.want, got)
|
||||
}
|
||||
if test.wantError != gotError {
|
||||
t.Logf("%v: want error %v got error %v", test.in, test.wantError, gotError)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
73
b2/b2_test.go
Normal file
73
b2/b2_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Test B2 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*b2.Object)(nil))
|
||||
fstests.RemoteName = "TestB2:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
425
b2/upload.go
Normal file
425
b2/upload.go
Normal file
@@ -0,0 +1,425 @@
|
||||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type hashAppendingReader struct {
|
||||
h hash.Hash
|
||||
in io.Reader
|
||||
hexSum string
|
||||
hexReader io.Reader
|
||||
}
|
||||
|
||||
// Read returns bytes all bytes from the original reader, then the hex sum
|
||||
// of what was read so far, then EOF.
|
||||
func (har *hashAppendingReader) Read(b []byte) (int, error) {
|
||||
if har.hexReader == nil {
|
||||
n, err := har.in.Read(b)
|
||||
if err == io.EOF {
|
||||
har.in = nil // allow GC
|
||||
err = nil // allow reading hexSum before EOF
|
||||
|
||||
har.hexSum = hex.EncodeToString(har.h.Sum(nil))
|
||||
har.hexReader = strings.NewReader(har.hexSum)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
return har.hexReader.Read(b)
|
||||
}
|
||||
|
||||
// AdditionalLength returns how many bytes the appended hex sum will take up.
|
||||
func (har *hashAppendingReader) AdditionalLength() int {
|
||||
return hex.EncodedLen(har.h.Size())
|
||||
}
|
||||
|
||||
// HexSum returns the hash sum as hex. It's only available after the original
|
||||
// reader has EOF'd. It's an empty string before that.
|
||||
func (har *hashAppendingReader) HexSum() string {
|
||||
return har.hexSum
|
||||
}
|
||||
|
||||
// newHashAppendingReader takes a Reader and a Hash and will append the hex sum
|
||||
// after the original reader reaches EOF. The increased size depends on the
|
||||
// given hash, which may be queried through AdditionalLength()
|
||||
func newHashAppendingReader(in io.Reader, h hash.Hash) *hashAppendingReader {
|
||||
withHash := io.TeeReader(in, h)
|
||||
return &hashAppendingReader{h: h, in: withHash}
|
||||
}
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
|
||||
} else {
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
modTime := src.ModTime()
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up = &largeUpload{
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, sha1SliceSize),
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_part_url",
|
||||
}
|
||||
var request = api.GetUploadPartURLRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
|
||||
// returnUploadURL returns the UploadURL to the cache
|
||||
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
if upload == nil {
|
||||
return
|
||||
}
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = append(up.uploads, upload)
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (up *largeUpload) clearUploadURL() {
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = nil
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||
|
||||
// Authorization
|
||||
//
|
||||
// An upload authorization token, from b2_get_upload_part_url.
|
||||
//
|
||||
// X-Bz-Part-Number
|
||||
//
|
||||
// A number from 1 to 10000. The parts uploaded for one file
|
||||
// must have contiguous numbers, starting with 1.
|
||||
//
|
||||
// Content-Length
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: upload.UploadURL,
|
||||
Body: fs.AccountPart(up.o, in),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
sha1Header: "hex_digits_at_end",
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
up.sha1s[part-1] = in.HexSum()
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_finish_large_file",
|
||||
}
|
||||
var request = api.FinishLargeFileRequest{
|
||||
ID: up.id,
|
||||
SHA1s: up.sha1s,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
}
|
||||
var request = api.CancelLargeFileRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(part, buf)
|
||||
}
|
||||
|
||||
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return up.finish()
|
||||
}
|
||||
|
||||
// Stream uploads the chunks from the input, starting with a required initial
|
||||
// chunk. Assumes the file size is unknown and will upload until the input
|
||||
// reaches EOF.
|
||||
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
errs := make(chan error, 1)
|
||||
hasMoreParts := true
|
||||
var wg sync.WaitGroup
|
||||
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
|
||||
|
||||
// Transfer initial chunk
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
|
||||
|
||||
outer:
|
||||
for part := int64(2); hasMoreParts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
n, err := io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
hasMoreParts = false
|
||||
err = nil
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putUploadBlock(buf)
|
||||
hasMoreParts = false
|
||||
err = nil
|
||||
break outer
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
}
|
||||
wg.Wait()
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
var err error
|
||||
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
|
||||
outer:
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()[:reqSize]
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
208
bin/cross-compile.go
Normal file
208
bin/cross-compile.go
Normal file
@@ -0,0 +1,208 @@
|
||||
// +build ignore
|
||||
|
||||
// Cross compile rclone - in go because I hate bash ;-)
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
debug = flag.Bool("d", false, "Print commands instead of running them.")
|
||||
parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel.")
|
||||
copyAs = flag.String("release", "", "Make copies of the releases with this name")
|
||||
gitLog = flag.String("git-log", "", "git log to include as well")
|
||||
include = flag.String("include", "^.*$", "os/arch regexp to include")
|
||||
exclude = flag.String("exclude", "^$", "os/arch regexp to exclude")
|
||||
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
||||
tags = flag.String("tags", "", "Space separated list of build tags")
|
||||
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
|
||||
)
|
||||
|
||||
// GOOS/GOARCH pairs we build for
|
||||
var osarches = []string{
|
||||
"windows/386",
|
||||
"windows/amd64",
|
||||
"darwin/386",
|
||||
"darwin/amd64",
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
"linux/arm",
|
||||
"linux/arm64",
|
||||
"linux/mips",
|
||||
"linux/mipsle",
|
||||
"freebsd/386",
|
||||
"freebsd/amd64",
|
||||
"freebsd/arm",
|
||||
"netbsd/386",
|
||||
"netbsd/amd64",
|
||||
"netbsd/arm",
|
||||
"openbsd/386",
|
||||
"openbsd/amd64",
|
||||
"plan9/386",
|
||||
"plan9/amd64",
|
||||
"solaris/amd64",
|
||||
}
|
||||
|
||||
// Special environment flags for a given arch
|
||||
var archFlags = map[string][]string{
|
||||
"386": {"GO386=387"},
|
||||
}
|
||||
|
||||
// runEnv - run a shell command with env
|
||||
func runEnv(args, env []string) {
|
||||
if *debug {
|
||||
args = append([]string{"echo"}, args...)
|
||||
}
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if env != nil {
|
||||
cmd.Env = append(os.Environ(), env...)
|
||||
}
|
||||
if *debug {
|
||||
log.Printf("args = %v, env = %v\n", args, cmd.Env)
|
||||
}
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to run %v: %v", args, err)
|
||||
}
|
||||
}
|
||||
|
||||
// run a shell command
|
||||
func run(args ...string) {
|
||||
runEnv(args, nil)
|
||||
}
|
||||
|
||||
// build the binary in dir
|
||||
func compileArch(version, goos, goarch, dir string) {
|
||||
log.Printf("Compiling %s/%s", goos, goarch)
|
||||
output := filepath.Join(dir, "rclone")
|
||||
if goos == "windows" {
|
||||
output += ".exe"
|
||||
}
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to mkdir: %v", err)
|
||||
}
|
||||
args := []string{
|
||||
"go", "build",
|
||||
"--ldflags", "-s -X github.com/ncw/rclone/fs.Version=" + version,
|
||||
"-i",
|
||||
"-o", output,
|
||||
"-tags", *tags,
|
||||
"..",
|
||||
}
|
||||
env := []string{
|
||||
"GOOS=" + goos,
|
||||
"GOARCH=" + goarch,
|
||||
}
|
||||
if !*cgo {
|
||||
env = append(env, "CGO_ENABLED=0")
|
||||
} else {
|
||||
env = append(env, "CGO_ENABLED=1")
|
||||
}
|
||||
if flags, ok := archFlags[goarch]; ok {
|
||||
env = append(env, flags...)
|
||||
}
|
||||
runEnv(args, env)
|
||||
if !*compileOnly {
|
||||
// Now build the zip
|
||||
run("cp", "-a", "../MANUAL.txt", filepath.Join(dir, "README.txt"))
|
||||
run("cp", "-a", "../MANUAL.html", filepath.Join(dir, "README.html"))
|
||||
run("cp", "-a", "../rclone.1", dir)
|
||||
if *gitLog != "" {
|
||||
run("cp", "-a", *gitLog, dir)
|
||||
}
|
||||
zip := dir + ".zip"
|
||||
run("zip", "-r9", zip, dir)
|
||||
if *copyAs != "" {
|
||||
copyAsZip := strings.Replace(zip, "-"+version, "-"+*copyAs, 1)
|
||||
run("ln", zip, copyAsZip)
|
||||
}
|
||||
run("rm", "-rf", dir)
|
||||
}
|
||||
log.Printf("Done compiling %s/%s", goos, goarch)
|
||||
}
|
||||
|
||||
func compile(version string) {
|
||||
start := time.Now()
|
||||
wg := new(sync.WaitGroup)
|
||||
run := make(chan func(), *parallel)
|
||||
for i := 0; i < *parallel; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for f := range run {
|
||||
f()
|
||||
}
|
||||
}()
|
||||
}
|
||||
includeRe, err := regexp.Compile(*include)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad -include regexp: %v", err)
|
||||
}
|
||||
excludeRe, err := regexp.Compile(*exclude)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad -exclude regexp: %v", err)
|
||||
}
|
||||
compiled := 0
|
||||
for _, osarch := range osarches {
|
||||
if excludeRe.MatchString(osarch) || !includeRe.MatchString(osarch) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(osarch, "/")
|
||||
if len(parts) != 2 {
|
||||
log.Fatalf("Bad osarch %q", osarch)
|
||||
}
|
||||
goos, goarch := parts[0], parts[1]
|
||||
userGoos := goos
|
||||
if goos == "darwin" {
|
||||
userGoos = "osx"
|
||||
}
|
||||
dir := filepath.Join("rclone-" + version + "-" + userGoos + "-" + goarch)
|
||||
run <- func() {
|
||||
compileArch(version, goos, goarch, dir)
|
||||
}
|
||||
compiled++
|
||||
}
|
||||
close(run)
|
||||
wg.Wait()
|
||||
log.Printf("Compiled %d arches in %v", compiled, time.Since(start))
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Syntax: %s <version>", os.Args[0])
|
||||
}
|
||||
version := args[0]
|
||||
if !*noClean {
|
||||
run("rm", "-rf", "build")
|
||||
run("mkdir", "build")
|
||||
}
|
||||
err := os.Chdir("build")
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't cd into build dir: %v", err)
|
||||
}
|
||||
err = ioutil.WriteFile("version.txt", []byte(fmt.Sprintf("rclone %s\n", version)), 0666)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't write version.txt: %v", err)
|
||||
}
|
||||
compile(version)
|
||||
}
|
||||
59
bin/decrypt_names.py
Executable file
59
bin/decrypt_names.py
Executable file
@@ -0,0 +1,59 @@
|
||||
#!/usr/bin/python
|
||||
"""
|
||||
This is a tool to decrypt file names in rclone logs.
|
||||
|
||||
Pass two files in, the first should be a crypt mapping generated by
|
||||
|
||||
rclone ls --crypt-show-mapping remote:path
|
||||
|
||||
The second should be a log file that you want the paths decrypted in.
|
||||
|
||||
Note that if the crypt mappings file is large it can take some time to
|
||||
run.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
# Crypt line
|
||||
match_crypt = re.compile(r'NOTICE: (.*?): Encrypts to "(.*?)"$')
|
||||
|
||||
def read_crypt_map(mapping_file):
|
||||
"""
|
||||
Read the crypt mapping file in, creating a dictionary of substitutions
|
||||
"""
|
||||
mapping = {}
|
||||
with open(mapping_file) as fd:
|
||||
for line in fd:
|
||||
match = match_crypt.search(line)
|
||||
if match:
|
||||
plaintext, ciphertext = match.groups()
|
||||
plaintexts = plaintext.split("/")
|
||||
ciphertexts = ciphertext.split("/")
|
||||
for plain, cipher in zip(plaintexts, ciphertexts):
|
||||
mapping[cipher] = plain
|
||||
return mapping
|
||||
|
||||
def map_log_file(crypt_map, log_file):
|
||||
"""
|
||||
Substitute the crypt_map in the log file.
|
||||
|
||||
This uses a straight forward O(N**2) algorithm. I tried using
|
||||
regexps to speed it up but it made it slower!
|
||||
"""
|
||||
with open(log_file) as fd:
|
||||
for line in fd:
|
||||
for cipher, plain in crypt_map.iteritems():
|
||||
line = line.replace(cipher, plain)
|
||||
sys.stdout.write(line)
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print "Syntax: %s <crypt-mapping-file> <log-file>" % sys.argv[0]
|
||||
raise SystemExit(1)
|
||||
mapping_file, log_file = sys.argv[1:]
|
||||
crypt_map = read_crypt_map(mapping_file)
|
||||
map_log_file(crypt_map, log_file)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
149
bin/make_manual.py
Executable file
149
bin/make_manual.py
Executable file
@@ -0,0 +1,149 @@
|
||||
#!/usr/bin/python
|
||||
"""
|
||||
Make single page versions of the documentation for release and
|
||||
conversion into man pages etc.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
outfile = "MANUAL.md"
|
||||
|
||||
# Order to add docs segments to make outfile
|
||||
docs = [
|
||||
"about.md",
|
||||
"install.md",
|
||||
"docs.md",
|
||||
"remote_setup.md",
|
||||
"filtering.md",
|
||||
"overview.md",
|
||||
|
||||
# Keep these alphabetical by full name
|
||||
"amazonclouddrive.md",
|
||||
"s3.md",
|
||||
"b2.md",
|
||||
"box.md",
|
||||
"cache.md",
|
||||
"crypt.md",
|
||||
"dropbox.md",
|
||||
"ftp.md",
|
||||
"googlecloudstorage.md",
|
||||
"drive.md",
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
"qingstor.md",
|
||||
"swift.md",
|
||||
"pcloud.md",
|
||||
"sftp.md",
|
||||
"webdav.md",
|
||||
"yandex.md",
|
||||
|
||||
"local.md",
|
||||
"changelog.md",
|
||||
"bugs.md",
|
||||
"faq.md",
|
||||
"licence.md",
|
||||
"authors.md",
|
||||
"contact.md",
|
||||
]
|
||||
|
||||
# Order to put the commands in - any not on here will be in sorted order
|
||||
commands_order = [
|
||||
"rclone_config.md",
|
||||
"rclone_copy.md",
|
||||
"rclone_sync.md",
|
||||
"rclone_move.md",
|
||||
"rclone_delete.md",
|
||||
"rclone_purge.md",
|
||||
"rclone_mkdir.md",
|
||||
"rclone_rmdir.md",
|
||||
"rclone_check.md",
|
||||
"rclone_ls.md",
|
||||
"rclone_lsd.md",
|
||||
"rclone_lsl.md",
|
||||
"rclone_md5sum.md",
|
||||
"rclone_sha1sum.md",
|
||||
"rclone_size.md",
|
||||
"rclone_version.md",
|
||||
"rclone_cleanup.md",
|
||||
"rclone_dedupe.md",
|
||||
]
|
||||
|
||||
# Docs which aren't made into outfile
|
||||
ignore_docs = [
|
||||
"downloads.md",
|
||||
"privacy.md",
|
||||
"donate.md",
|
||||
]
|
||||
|
||||
def read_doc(doc):
|
||||
"""Read file as a string"""
|
||||
path = os.path.join(docpath, doc)
|
||||
with open(path) as fd:
|
||||
contents = fd.read()
|
||||
parts = contents.split("---\n", 2)
|
||||
if len(parts) != 3:
|
||||
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
|
||||
contents = parts[2].strip()+"\n\n"
|
||||
# Remove icons
|
||||
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
|
||||
# Make [...](/links/) absolute
|
||||
contents = re.sub(r'\((\/.*?\/)\)', r"(https://rclone.org\1)", contents)
|
||||
# Interpret provider shortcode
|
||||
# {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
contents = re.sub(r'\{\{<\s+provider.*?name="(.*?)".*?>\}\}', r"\1", contents)
|
||||
return contents
|
||||
|
||||
def check_docs(docpath):
|
||||
"""Check all the docs are in docpath"""
|
||||
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
|
||||
files -= set(ignore_docs)
|
||||
docs_set = set(docs)
|
||||
if files == docs_set:
|
||||
return
|
||||
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
|
||||
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
|
||||
raise ValueError("Missing files")
|
||||
|
||||
def read_command(command):
|
||||
doc = read_doc("commands/"+command)
|
||||
doc = re.sub(r"### Options inherited from parent commands.*$", "", doc, 0, re.S)
|
||||
doc = doc.strip()+"\n"
|
||||
return doc
|
||||
|
||||
def read_commands(docpath):
|
||||
"""Reads the commands an makes them into a single page"""
|
||||
files = set(f for f in os.listdir(docpath + "/commands") if f.endswith(".md"))
|
||||
docs = []
|
||||
for command in commands_order:
|
||||
docs.append(read_command(command))
|
||||
files.remove(command)
|
||||
for command in sorted(files):
|
||||
if command != "rclone.md":
|
||||
docs.append(read_command(command))
|
||||
return "\n".join(docs)
|
||||
|
||||
def main():
|
||||
check_docs(docpath)
|
||||
command_docs = read_commands(docpath)
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % datetime.now().strftime("%b %d, %Y"))
|
||||
for doc in docs:
|
||||
contents = read_doc(doc)
|
||||
# Substitute the commands into doc.md
|
||||
if doc == "docs.md":
|
||||
contents = re.sub(r"The main rclone commands.*?for the full list.", command_docs, contents, 0, re.S)
|
||||
out.write(contents)
|
||||
print "Written '%s'" % outfile
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
146
bin/make_test_files.go
Normal file
146
bin/make_test_files.go
Normal file
@@ -0,0 +1,146 @@
|
||||
// +build ignore
|
||||
|
||||
// Build a directory structure with the required number of files in
|
||||
//
|
||||
// Run with go run make_test_files.go [flag] <directory>
|
||||
package main
|
||||
|
||||
import (
|
||||
cryptrand "crypto/rand"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
|
||||
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
|
||||
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy")
|
||||
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
|
||||
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
|
||||
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
|
||||
maxFileNameLength = flag.Int("max-name-length", 12, "Maximum size of files to create")
|
||||
|
||||
directoriesToCreate int
|
||||
totalDirectories int
|
||||
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
|
||||
)
|
||||
|
||||
// randomString create a random string for test purposes
|
||||
func randomString(n int) string {
|
||||
const (
|
||||
vowel = "aeiou"
|
||||
consonant = "bcdfghjklmnpqrstvwxyz"
|
||||
digit = "0123456789"
|
||||
)
|
||||
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
|
||||
out := make([]byte, n)
|
||||
p := 0
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// fileName creates a unique random file or directory name
|
||||
func fileName() (name string) {
|
||||
for {
|
||||
length := rand.Intn(*maxFileNameLength-*minFileNameLength) + *minFileNameLength
|
||||
name = randomString(length)
|
||||
if _, found := fileNames[name]; !found {
|
||||
break
|
||||
}
|
||||
}
|
||||
fileNames[name] = struct{}{}
|
||||
return name
|
||||
}
|
||||
|
||||
// dir is a directory in the directory heirachy being built up
|
||||
type dir struct {
|
||||
name string
|
||||
depth int
|
||||
children []*dir
|
||||
parent *dir
|
||||
}
|
||||
|
||||
// Create a random directory heirachy under d
|
||||
func (d *dir) createDirectories() {
|
||||
for totalDirectories < directoriesToCreate {
|
||||
newDir := &dir{
|
||||
name: fileName(),
|
||||
depth: d.depth + 1,
|
||||
parent: d,
|
||||
}
|
||||
d.children = append(d.children, newDir)
|
||||
totalDirectories++
|
||||
switch rand.Intn(4) {
|
||||
case 0:
|
||||
if d.depth < *maxDepth {
|
||||
newDir.createDirectories()
|
||||
}
|
||||
case 1:
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// list the directory heirachy
|
||||
func (d *dir) list(path string, output []string) []string {
|
||||
dirPath := path + "/" + d.name
|
||||
output = append(output, dirPath)
|
||||
for _, subDir := range d.children {
|
||||
output = subDir.list(dirPath, output)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// writeFile writes a random file at dir/name
|
||||
func writeFile(dir, name string) {
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make directory %q: %v", dir, err)
|
||||
}
|
||||
path := filepath.Join(dir, name)
|
||||
fd, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open file %q: %v", path, err)
|
||||
}
|
||||
size := rand.Int63n(*maxFileSize-*minFileSize) + *minFileSize
|
||||
_, err = io.CopyN(fd, cryptrand.Reader, size)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
|
||||
}
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to close file %q: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Require 1 directory argument")
|
||||
}
|
||||
outputDirectory := args[0]
|
||||
log.Printf("Output dir %q", outputDirectory)
|
||||
|
||||
directoriesToCreate = *numberOfFiles / *averageFilesPerDirectory
|
||||
log.Printf("directoriesToCreate %v", directoriesToCreate)
|
||||
root := &dir{name: outputDirectory, depth: 1}
|
||||
for totalDirectories < directoriesToCreate {
|
||||
root.createDirectories()
|
||||
}
|
||||
dirs := root.list("", []string{})
|
||||
for i := 0; i < *numberOfFiles; i++ {
|
||||
dir := dirs[rand.Intn(len(dirs))]
|
||||
writeFile(dir, fileName())
|
||||
}
|
||||
}
|
||||
4
bin/travis.rclone.conf
Normal file
4
bin/travis.rclone.conf
Normal file
@@ -0,0 +1,4 @@
|
||||
# Encrypted rclone configuration File
|
||||
|
||||
RCLONE_ENCRYPT_V0:
|
||||
XIkAr3p+y+zai82cHFH8UoW1y1XTe6dpTzo/g4uSwqI2pfsnSSJ4JbAsRZ9nGVpx3NzROKEewlusVHNokiA4/nD4NbT+2DJrpMLg/OtLREICfuRk3tVWPKLGsmA+TLKU+IfQMO4LfrrCe2DF/lW0qA5Xu16E0Vn++jNhbwW2oB+JTkaGka8Ae3CyisM/3NUGnCOG/yb5wLH7ybUstNYPHsNFCiU1brFXQ4DNIbUFMmca+5S44vrOWvhp9QijQXlG7/JjwrkqbB/LK2gMJPTuhY2OW+4tRw1IoCXbWmwJXv5xmhPqanW92A==
|
||||
46
bin/update-authors.py
Executable file
46
bin/update-authors.py
Executable file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Update the authors.md file with the authors from the git log
|
||||
"""
|
||||
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
AUTHORS = "docs/content/authors.md"
|
||||
IGNORE = [ "nick@raig-wood.com" ]
|
||||
|
||||
def load():
|
||||
"""
|
||||
returns a set of emails already in authors.md
|
||||
"""
|
||||
with open(AUTHORS) as fd:
|
||||
authors = fd.read()
|
||||
emails = set(re.findall(r"<(.*?)>", authors))
|
||||
emails.update(IGNORE)
|
||||
return emails
|
||||
|
||||
def add_email(name, email):
|
||||
"""
|
||||
adds the email passed in to the end of authors.md
|
||||
"""
|
||||
print "Adding %s <%s>" % (name, email)
|
||||
with open(AUTHORS, "a+") as fd:
|
||||
print >>fd, " * %s <%s>" % (name, email)
|
||||
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
|
||||
|
||||
def main():
|
||||
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
|
||||
|
||||
previous = load()
|
||||
for line in out.split("\n"):
|
||||
line = line.strip()
|
||||
if line == "":
|
||||
continue
|
||||
name, email = line.split("|")
|
||||
if email in previous:
|
||||
continue
|
||||
previous.add(email)
|
||||
add_email(name, email)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
bin/upload-github
Executable file
50
bin/upload-github
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Upload a release
|
||||
#
|
||||
# Needs github-release from https://github.com/aktau/github-release
|
||||
|
||||
set -e
|
||||
|
||||
REPO="rclone"
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
echo "Syntax: $0 Version"
|
||||
exit 1
|
||||
fi
|
||||
VERSION="$1"
|
||||
if [ "$GITHUB_USER" == "" ]; then
|
||||
echo 1>&2 "Need GITHUB_USER environment variable"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$GITHUB_TOKEN" == "" ]; then
|
||||
echo 1>&2 "Need GITHUB_TOKEN environment variable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Making release ${VERSION}"
|
||||
github-release release \
|
||||
--repo ${REPO} \
|
||||
--tag ${VERSION} \
|
||||
--name "rclone" \
|
||||
--description "Rclone - rsync for cloud storage. Sync files to and from many cloud storage providers."
|
||||
|
||||
for build in `ls build | grep -v current`; do
|
||||
echo "Uploading ${build}"
|
||||
base="${build%.*}"
|
||||
parts=(${base//-/ })
|
||||
os=${parts[3]}
|
||||
arch=${parts[4]}
|
||||
|
||||
github-release upload \
|
||||
--repo ${REPO} \
|
||||
--tag ${VERSION} \
|
||||
--name "${build}" \
|
||||
--file build/${build}
|
||||
done
|
||||
|
||||
github-release info \
|
||||
--repo ${REPO} \
|
||||
--tag ${VERSION}
|
||||
|
||||
echo "Done"
|
||||
5
bin/win-build.bat
Normal file
5
bin/win-build.bat
Normal file
@@ -0,0 +1,5 @@
|
||||
@echo off
|
||||
echo Setting environment variables for mingw+WinFsp compile
|
||||
set GOPATH=X:\go
|
||||
set PATH=C:\Program Files\mingw-w64\i686-7.1.0-win32-dwarf-rt_v5-rev0\mingw32\bin;%PATH%
|
||||
set CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
192
box/api/types.go
Normal file
192
box/api/types.go
Normal file
@@ -0,0 +1,192 @@
|
||||
// Package api has type definitions for box
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// box API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from box when things go wrong
|
||||
type Error struct {
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.ContextInfo != nil {
|
||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
ItemStatusActive = "active"
|
||||
ItemStatusTrashed = "trashed"
|
||||
ItemStatusDeleted = "deleted"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
type Item struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
func (i *Item) ModTime() (t time.Time) {
|
||||
t = time.Time(i.ContentModifiedAt)
|
||||
if t.IsZero() {
|
||||
t = time.Time(i.ModifiedAt)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// FolderItems is returned from the GetFolderItems call
|
||||
type FolderItems struct {
|
||||
TotalCount int `json:"total_count"`
|
||||
Entries []Item `json:"entries"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
} `json:"order"`
|
||||
}
|
||||
|
||||
// Parent defined the ID of the parent directory
|
||||
type Parent struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// CreateFolder is the request for Create Folder
|
||||
type CreateFolder struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// UploadFile is the request for Upload File
|
||||
type UploadFile struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// UpdateFileModTime is used in Update File Info
|
||||
type UpdateFileModTime struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// UpdateFileMove is the request for Upload File to change name and parent
|
||||
type UpdateFileMove struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// CopyFile is the request for Copy File
|
||||
type CopyFile struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// UploadSessionRequest is uses in Create Upload Session
|
||||
type UploadSessionRequest struct {
|
||||
FolderID string `json:"folder_id,omitempty"` // don't pass for update
|
||||
FileSize int64 `json:"file_size"`
|
||||
FileName string `json:"file_name,omitempty"` // optional for update
|
||||
}
|
||||
|
||||
// UploadSessionResponse is returned from Create Upload Session
|
||||
type UploadSessionResponse struct {
|
||||
TotalParts int `json:"total_parts"`
|
||||
PartSize int64 `json:"part_size"`
|
||||
SessionEndpoints struct {
|
||||
ListParts string `json:"list_parts"`
|
||||
Commit string `json:"commit"`
|
||||
UploadPart string `json:"upload_part"`
|
||||
Status string `json:"status"`
|
||||
Abort string `json:"abort"`
|
||||
} `json:"session_endpoints"`
|
||||
SessionExpiresAt Time `json:"session_expires_at"`
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
NumPartsProcessed int `json:"num_parts_processed"`
|
||||
}
|
||||
|
||||
// Part defines the return from upload part call which are passed to commit upload also
|
||||
type Part struct {
|
||||
PartID string `json:"part_id"`
|
||||
Offset int `json:"offset"`
|
||||
Size int `json:"size"`
|
||||
Sha1 string `json:"sha1"`
|
||||
}
|
||||
|
||||
// UploadPartResponse is returned from the upload part call
|
||||
type UploadPartResponse struct {
|
||||
Part Part `json:"part"`
|
||||
}
|
||||
|
||||
// CommitUpload is used in the Commit Upload call
|
||||
type CommitUpload struct {
|
||||
Parts []Part `json:"parts"`
|
||||
Attributes struct {
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
1059
box/box.go
Normal file
1059
box/box.go
Normal file
File diff suppressed because it is too large
Load Diff
73
box/box_test.go
Normal file
73
box/box_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Test Box filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package box_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/box"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*box.Object)(nil))
|
||||
fstests.RemoteName = "TestBox:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
270
box/upload.go
Normal file
270
box/upload.go
Normal file
@@ -0,0 +1,270 @@
|
||||
// multpart upload for box
|
||||
|
||||
package box
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions",
|
||||
RootURL: uploadURL,
|
||||
}
|
||||
request := api.UploadSessionRequest{
|
||||
FileSize: size,
|
||||
}
|
||||
// If object has an ID then it is existing so create a new version
|
||||
if o.id != "" {
|
||||
opts.Path = "/files/" + o.id + "/upload_sessions"
|
||||
} else {
|
||||
opts.Path = "/files/upload_sessions"
|
||||
request.FolderID = directoryID
|
||||
request.FileName = replaceReservedChars(leaf)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// sha1Digest produces a digest using sha1 as per RFC3230
|
||||
func sha1Digest(digest []byte) string {
|
||||
return "sha=" + base64.StdEncoding.EncodeToString(digest)
|
||||
}
|
||||
|
||||
// uploadPart uploads a part in an upload session
|
||||
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte) (response *api.UploadPartResponse, err error) {
|
||||
chunkSize := int64(len(chunk))
|
||||
in := bytes.NewReader(chunk)
|
||||
sha1sum := sha1.Sum(chunk)
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
RootURL: uploadURL,
|
||||
ContentType: "application/octet-stream",
|
||||
ContentLength: &chunkSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Digest": sha1Digest(sha1sum[:]),
|
||||
},
|
||||
Body: in,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _ = in.Seek(0, 0)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// commitUpload finishes an upload session
|
||||
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions/" + SessionID + "/commit",
|
||||
RootURL: uploadURL,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Digest": sha1Digest(sha1sum),
|
||||
},
|
||||
}
|
||||
request := api.CommitUpload{
|
||||
Parts: parts,
|
||||
}
|
||||
request.Attributes.ContentModifiedAt = api.Time(modTime)
|
||||
request.Attributes.ContentCreatedAt = api.Time(modTime)
|
||||
var body []byte
|
||||
var resp *http.Response
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
delay := defaultDelay
|
||||
why := "unknown"
|
||||
if err != nil {
|
||||
// Sometimes we get 400 Error with
|
||||
// parts_mismatch immediately after uploading
|
||||
// the last part. Ignore this error and wait.
|
||||
if boxErr, ok := err.(*api.Error); ok && boxErr.Code == "parts_mismatch" {
|
||||
why = err.Error()
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated:
|
||||
break outer
|
||||
case http.StatusAccepted:
|
||||
why = "not ready yet"
|
||||
delayString := resp.Header.Get("Retry-After")
|
||||
if delayString != "" {
|
||||
delay, err = strconv.Atoi(delayString)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Couldn't decode Retry-After header %q: %v", delayString, err)
|
||||
delay = defaultDelay
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
}
|
||||
if tries >= maxTries {
|
||||
return nil, errors.New("too many tries to commit multipart upload - increase --low-level-retries")
|
||||
}
|
||||
err = json.Unmarshal(body, &result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// abortUpload cancels an upload session
|
||||
func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
RootURL: uploadURL,
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
}
|
||||
chunkSize := session.PartSize
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Cancel the session if something went wrong
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.abortUpload(session.ID)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Upload the chunks
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
parts := make([]api.Part, session.TotalParts)
|
||||
hash := sha1.New()
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
outer:
|
||||
for part := 0; part < session.TotalParts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Make a block of memory
|
||||
buf := make([]byte, reqSize)
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||
break outer
|
||||
}
|
||||
|
||||
// Make the global hash (must be done sequentially)
|
||||
_, _ = hash.Write(buf)
|
||||
|
||||
// Transfer the chunk
|
||||
wg.Add(1)
|
||||
o.fs.uploadToken.Get()
|
||||
go func(part int, position int64) {
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(session.ID, position, size, buf)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
parts[part] = partResponse.Part
|
||||
}(part, position)
|
||||
|
||||
// ready for next block
|
||||
remaining -= chunkSize
|
||||
position += chunkSize
|
||||
}
|
||||
wg.Wait()
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
|
||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||
return errors.Errorf("multipart upload failed %v - not sure why", o)
|
||||
}
|
||||
return o.setMetaData(&result.Entries[0])
|
||||
}
|
||||
1017
cache/cache.go
vendored
Normal file
1017
cache/cache.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
758
cache/cache_internal_test.go
vendored
Normal file
758
cache/cache_internal_test.go
vendored
Normal file
@@ -0,0 +1,758 @@
|
||||
// +build !plan9,go1.7
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
//"os"
|
||||
"os/exec"
|
||||
//"strings"
|
||||
|
||||
"github.com/ncw/rclone/cache"
|
||||
//"github.com/ncw/rclone/cmd/mount"
|
||||
//_ "github.com/ncw/rclone/cmd/cmount"
|
||||
//"github.com/ncw/rclone/cmd/mountlib"
|
||||
_ "github.com/ncw/rclone/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/local"
|
||||
flag "github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
infoAge = time.Second * 10
|
||||
chunkClean = time.Second
|
||||
okDiff = time.Second * 9 // really big diff here but the build machines seem to be slow. need a different way for this
|
||||
workers = 2
|
||||
)
|
||||
|
||||
func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "tilrair-local", "tilrair-cache", nil)
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
// Instantiate inner fs
|
||||
innerFolder := "inner"
|
||||
err := rootFs.Mkdir(innerFolder)
|
||||
require.NoError(t, err)
|
||||
innerFs, err := fs.NewFs("tilrair-cache:" + innerFolder)
|
||||
require.NoError(t, err)
|
||||
|
||||
obj := writeObjectString(t, innerFs, "one", "content")
|
||||
|
||||
listRoot, err := rootFs.List("")
|
||||
require.NoError(t, err)
|
||||
listRootInner, err := rootFs.List(innerFolder)
|
||||
require.NoError(t, err)
|
||||
listInner, err := innerFs.List("")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Lenf(t, listRoot, 1, "remote %v should have 1 entry", rootFs.Root())
|
||||
require.Lenf(t, listRootInner, 1, "remote %v should have 1 entry in %v", rootFs.Root(), innerFolder)
|
||||
require.Lenf(t, listInner, 1, "remote %v should have 1 entry", innerFs.Root())
|
||||
|
||||
err = obj.Remove()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "tiowff-local", "tiowff-cache", nil)
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
wrappedFs := cfs.UnWrap()
|
||||
data := "content"
|
||||
writeObjectString(t, wrappedFs, "second", data)
|
||||
|
||||
listRoot, err := rootFs.List("")
|
||||
require.NoError(t, err)
|
||||
require.Lenf(t, listRoot, 1, "remote %v should have 1 entry", rootFs.Root())
|
||||
|
||||
co, err := rootFs.NewObject("second")
|
||||
require.NoError(t, err)
|
||||
r, err := co.Open()
|
||||
require.NoError(t, err)
|
||||
cachedData, err := ioutil.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
err = r.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
strCached := string(cachedData)
|
||||
require.Equal(t, data, strCached)
|
||||
|
||||
err = co.Remove()
|
||||
require.NoError(t, err)
|
||||
|
||||
listRoot, err = wrappedFs.List("")
|
||||
require.NoError(t, err)
|
||||
require.Lenf(t, listRoot, 0, "remote %v should have 0 entries: %v", wrappedFs.Root(), listRoot)
|
||||
}
|
||||
|
||||
func TestInternalObjNotFound(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "tionf-local", "tionf-cache", nil)
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
obj, err := rootFs.NewObject("404")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, obj)
|
||||
}
|
||||
|
||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "ticwcm-local", "ticwcm-cache", nil)
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := make([]byte, (chunkSize*4 + chunkSize/2))
|
||||
testSize, err := rand.Read(testData)
|
||||
require.Equal(t, len(testData), testSize, "data size doesn't match")
|
||||
require.NoError(t, err)
|
||||
|
||||
// write the object
|
||||
o := writeObjectBytes(t, rootFs, "data.bin", testData)
|
||||
require.Equal(t, o.Size(), int64(testSize))
|
||||
|
||||
// check sample of data from in-file
|
||||
sampleStart := chunkSize / 2
|
||||
sampleEnd := chunkSize
|
||||
testSample := testData[sampleStart:sampleEnd]
|
||||
checkSample := readDataFromObj(t, o, sampleStart, sampleEnd, false)
|
||||
require.Equal(t, int64(len(checkSample)), sampleEnd-sampleStart)
|
||||
require.Equal(t, checkSample, testSample)
|
||||
}
|
||||
|
||||
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "ticucm-local", "ticucm-cache", nil)
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
// create some rand test data
|
||||
testData1 := []byte(fstest.RandomString(100))
|
||||
testData2 := []byte(fstest.RandomString(200))
|
||||
|
||||
// write the object
|
||||
o := updateObjectBytes(t, rootFs, "data.bin", testData1, testData2)
|
||||
require.Equal(t, o.Size(), int64(len(testData2)))
|
||||
|
||||
// check data from in-file
|
||||
reader, err := o.Open()
|
||||
require.NoError(t, err)
|
||||
checkSample, err := ioutil.ReadAll(reader)
|
||||
_ = reader.Close()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, checkSample, testData2)
|
||||
}
|
||||
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "tiwwcm-local", "tiwwcm-cache", nil)
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := make([]byte, (chunkSize*4 + chunkSize/2))
|
||||
testSize, err := rand.Read(testData)
|
||||
require.Equal(t, len(testData), testSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
// write the object
|
||||
o := writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||
require.Equal(t, o.Size(), int64(testSize))
|
||||
|
||||
o2, err := rootFs.NewObject("data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, o2.Size(), o.Size())
|
||||
|
||||
// check sample of data from in-file
|
||||
sampleStart := chunkSize / 2
|
||||
sampleEnd := chunkSize
|
||||
testSample := testData[sampleStart:sampleEnd]
|
||||
checkSample := readDataFromObj(t, o2, sampleStart, sampleEnd, false)
|
||||
require.Equal(t, len(checkSample), len(testSample))
|
||||
|
||||
for i := 0; i < len(checkSample); i++ {
|
||||
require.Equal(t, testSample[i], checkSample[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
t.Skip("FIXME disabled because it is unreliable")
|
||||
rootFs, boltDb := newLocalCacheFs(t, "tilwcm-local", "tilwcm-cache", nil)
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := make([]byte, (chunkSize*10 + chunkSize/2))
|
||||
testSize, err := rand.Read(testData)
|
||||
require.Equal(t, len(testData), testSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
// write the object
|
||||
o := writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||
require.Equal(t, o.Size(), int64(testSize))
|
||||
|
||||
o2, err := rootFs.NewObject("data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, o2.Size(), o.Size())
|
||||
|
||||
// check data from in-file
|
||||
checkSample := readDataFromObj(t, o2, int64(0), int64(testSize), false)
|
||||
require.Equal(t, len(checkSample), len(testData))
|
||||
|
||||
for i := 0; i < len(checkSample); i++ {
|
||||
require.Equal(t, testData[i], checkSample[i], "byte: %d (%d), chunk: %d", int64(i)%chunkSize, i, int64(i)/chunkSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalLargeWrittenContentMatches2(t *testing.T) {
|
||||
cryptFs, boltDb := newLocalCacheCryptFs(t, "tilwcm2-local", "tilwcm2-cache", "tilwcm2-crypt", true, nil)
|
||||
defer cleanupFs(t, cryptFs, boltDb)
|
||||
|
||||
cfs, err := getCacheFs(cryptFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
fileSize := 87197196
|
||||
readOffset := 87195648
|
||||
|
||||
// create some rand test data
|
||||
testData := make([]byte, fileSize)
|
||||
testSize, err := rand.Read(testData)
|
||||
require.Equal(t, len(testData), testSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
// write the object
|
||||
o := writeObjectBytes(t, cryptFs, "data.bin", testData)
|
||||
require.Equal(t, o.Size(), int64(testSize))
|
||||
|
||||
o2, err := cryptFs.NewObject("data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, o2.Size(), o.Size())
|
||||
|
||||
// check data from in-file
|
||||
reader, err := o2.Open(&fs.SeekOption{Offset: int64(readOffset)})
|
||||
require.NoError(t, err)
|
||||
rs, ok := reader.(io.Seeker)
|
||||
require.True(t, ok)
|
||||
checkOffset, err := rs.Seek(int64(readOffset), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, checkOffset, int64(readOffset))
|
||||
checkSample, err := ioutil.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
_ = reader.Close()
|
||||
|
||||
require.Equal(t, len(checkSample), fileSize-readOffset)
|
||||
for i := 0; i < fileSize-readOffset; i++ {
|
||||
require.Equal(t, testData[readOffset+i], checkSample[i], "byte: %d (%d), chunk: %d", int64(i)%chunkSize, i, int64(i)/chunkSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "tiwfcns-local", "tiwfcns-cache", nil)
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
|
||||
|
||||
// update in the wrapped fs
|
||||
o, err := cfs.UnWrap().NewObject(co.Remote())
|
||||
require.NoError(t, err)
|
||||
err = o.SetModTime(co.ModTime().Truncate(time.Hour))
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new instance from the cache
|
||||
co2, err := rootFs.NewObject(o.Remote())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
require.NotEqual(t, o.ModTime().String(), co2.ModTime().String())
|
||||
require.Equal(t, co.ModTime().String(), co2.ModTime().String())
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "ticsadcf-local", "ticsadcf-cache", nil)
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
|
||||
|
||||
// update in the wrapped fs
|
||||
o, err := cfs.UnWrap().NewObject(co.Remote())
|
||||
require.NoError(t, err)
|
||||
err = o.SetModTime(co.ModTime().Add(-1 * time.Hour))
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new instance from the cache
|
||||
co2, err := rootFs.NewObject(o.Remote())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
require.NotEqual(t, o.ModTime().String(), co2.ModTime().String())
|
||||
require.Equal(t, co.ModTime().String(), co2.ModTime().String())
|
||||
|
||||
cfs.DirCacheFlush() // flush the cache
|
||||
|
||||
l, err := cfs.UnWrap().List("")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, l, 1)
|
||||
o2 := l[0]
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err = rootFs.NewObject(o.Remote())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, o2.ModTime().String(), co.ModTime().String())
|
||||
}
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "ticw-local", "ticw-cache", map[string]string{"cache-writes": "true"})
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
|
||||
expectedTs := time.Now()
|
||||
ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), co.Remote()), 0)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, expectedTs, ts, okDiff)
|
||||
}
|
||||
|
||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "timcsr-local", "timcsr-cache", map[string]string{"cache-workers": "1"})
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
totalChunks := 20
|
||||
|
||||
// create some rand test data
|
||||
obj := writeObjectRandomBytes(t, cfs, (int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||
o, err := rootFs.NewObject(obj.Remote())
|
||||
require.NoError(t, err)
|
||||
co, ok := o.(*cache.Object)
|
||||
require.True(t, ok)
|
||||
|
||||
for i := 0; i < 4; i++ { // read first 4
|
||||
_ = readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||
}
|
||||
cfs.CleanUpCache(true)
|
||||
// the last 2 **must** be in the cache
|
||||
require.True(t, boltDb.HasChunk(co, chunkSize*2))
|
||||
require.True(t, boltDb.HasChunk(co, chunkSize*3))
|
||||
|
||||
for i := 4; i < 6; i++ { // read next 2
|
||||
_ = readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||
}
|
||||
cfs.CleanUpCache(true)
|
||||
// the last 2 **must** be in the cache
|
||||
require.True(t, boltDb.HasChunk(co, chunkSize*4))
|
||||
require.True(t, boltDb.HasChunk(co, chunkSize*5))
|
||||
}
|
||||
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
rootFs, boltDb := newLocalCacheFs(t, "tieer-local", "tieer-cache", map[string]string{"info_age": "5s"})
|
||||
defer cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
_ = writeObjectString(t, cfs, "one", "one content")
|
||||
err = cfs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
_ = writeObjectString(t, cfs, "test/second", "second content")
|
||||
|
||||
l, err := cfs.List("test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, l, 1)
|
||||
|
||||
err = cfs.UnWrap().Mkdir("test/test2")
|
||||
require.NoError(t, err)
|
||||
|
||||
l, err = cfs.List("test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, l, 1)
|
||||
|
||||
waitTime := time.Second * 5
|
||||
t.Logf("Waiting %v seconds for cache to expire\n", waitTime)
|
||||
time.Sleep(waitTime)
|
||||
|
||||
l, err = cfs.List("test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, l, 2)
|
||||
}
|
||||
|
||||
// FIXME, enable this when mount is sorted out
|
||||
//func TestInternalFilesMissingInMount1904(t *testing.T) {
|
||||
// t.Skip("Not yet")
|
||||
// if runtime.GOOS == "windows" {
|
||||
// t.Skip("Not yet")
|
||||
// }
|
||||
// id := "tifm1904"
|
||||
// rootFs, _ := newLocalCacheCryptFs(t, "test-local", "test-cache", "test-crypt", false,
|
||||
// map[string]string{"chunk_size": "5M", "info_age": "1m", "chunk_total_size": "500M", "cache-writes": "true"})
|
||||
// mntPoint := path.Join("/tmp", "tifm1904-mnt")
|
||||
// testPoint := path.Join(mntPoint, id)
|
||||
// checkOutput := "1 10 100 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 26 27 28 29 3 30 31 32 33 34 35 36 37 38 39 4 40 41 42 43 44 45 46 47 48 49 5 50 51 52 53 54 55 56 57 58 59 6 60 61 62 63 64 65 66 67 68 69 7 70 71 72 73 74 75 76 77 78 79 8 80 81 82 83 84 85 86 87 88 89 9 90 91 92 93 94 95 96 97 98 99 "
|
||||
//
|
||||
// _ = os.MkdirAll(mntPoint, os.ModePerm)
|
||||
//
|
||||
// list, err := rootFs.List("")
|
||||
// require.NoError(t, err)
|
||||
// found := false
|
||||
// list.ForDir(func(d fs.Directory) {
|
||||
// if strings.Contains(d.Remote(), id) {
|
||||
// found = true
|
||||
// }
|
||||
// })
|
||||
//
|
||||
// if !found {
|
||||
// t.Skip("Test folder '%v' doesn't exist", id)
|
||||
// }
|
||||
//
|
||||
// mountFs(t, rootFs, mntPoint)
|
||||
// defer unmountFs(t, mntPoint)
|
||||
//
|
||||
// for i := 1; i <= 2; i++ {
|
||||
// out, err := exec.Command("ls", testPoint).Output()
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, checkOutput, strings.Replace(string(out), "\n", " ", -1))
|
||||
// t.Logf("root path has all files")
|
||||
// _ = writeObjectString(t, rootFs, path.Join(id, strconv.Itoa(i), strconv.Itoa(i), "one_file"), "one content")
|
||||
//
|
||||
// for j := 1; j <= 100; j++ {
|
||||
// out, err := exec.Command("ls", path.Join(testPoint, strconv.Itoa(j))).Output()
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, checkOutput, strings.Replace(string(out), "\n", " ", -1), "'%v' doesn't match", j)
|
||||
// }
|
||||
// obj, err := rootFs.NewObject(path.Join(id, strconv.Itoa(i), strconv.Itoa(i), "one_file"))
|
||||
// require.NoError(t, err)
|
||||
// err = obj.Remove()
|
||||
// require.NoError(t, err)
|
||||
// t.Logf("folders contain all the files")
|
||||
//
|
||||
// out, err = exec.Command("date").Output()
|
||||
// require.NoError(t, err)
|
||||
// t.Logf("check #%v date: '%v'", i, strings.Replace(string(out), "\n", " ", -1))
|
||||
//
|
||||
// if i < 2 {
|
||||
// time.Sleep(time.Second * 60)
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
func writeObjectRandomBytes(t *testing.T, f fs.Fs, size int64) fs.Object {
|
||||
remote := strconv.Itoa(rand.Int()) + ".bin"
|
||||
// create some rand test data
|
||||
testData := make([]byte, size)
|
||||
testSize, err := rand.Read(testData)
|
||||
require.Equal(t, size, int64(len(testData)))
|
||||
require.Equal(t, size, int64(testSize))
|
||||
require.NoError(t, err)
|
||||
|
||||
o := writeObjectBytes(t, f, remote, testData)
|
||||
require.Equal(t, size, o.Size())
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
func writeObjectString(t *testing.T, f fs.Fs, remote, content string) fs.Object {
|
||||
return writeObjectBytes(t, f, remote, []byte(content))
|
||||
}
|
||||
|
||||
func writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
||||
in := bytes.NewReader(data)
|
||||
modTime := time.Now()
|
||||
objInfo := fs.NewStaticObjectInfo(remote, modTime, int64(len(data)), true, nil, f)
|
||||
|
||||
obj, err := f.Put(in, objInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func updateObjectBytes(t *testing.T, f fs.Fs, remote string, data1 []byte, data2 []byte) fs.Object {
|
||||
in1 := bytes.NewReader(data1)
|
||||
in2 := bytes.NewReader(data2)
|
||||
objInfo1 := fs.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := fs.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
|
||||
obj, err := f.Put(in1, objInfo1)
|
||||
require.NoError(t, err)
|
||||
obj, err = f.NewObject(remote)
|
||||
require.NoError(t, err)
|
||||
err = obj.Update(in2, objInfo2)
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func readDataFromObj(t *testing.T, co fs.Object, offset, end int64, useSeek bool) []byte {
|
||||
var reader io.ReadCloser
|
||||
var err error
|
||||
size := end - offset
|
||||
checkSample := make([]byte, size)
|
||||
|
||||
reader, err = co.Open(&fs.SeekOption{Offset: offset})
|
||||
require.NoError(t, err)
|
||||
|
||||
totalRead, err := io.ReadFull(reader, checkSample)
|
||||
require.NoError(t, err)
|
||||
_ = reader.Close()
|
||||
require.Equal(t, int64(totalRead), size, "wrong data read size from file")
|
||||
|
||||
return checkSample
|
||||
}
|
||||
|
||||
func cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
err := f.Features().Purge()
|
||||
require.NoError(t, err)
|
||||
b.Close()
|
||||
}
|
||||
|
||||
func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote string, purge bool, cfg map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
|
||||
chunkPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote)
|
||||
boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
localExists := false
|
||||
cacheExists := false
|
||||
cryptExists := false
|
||||
for _, s := range fs.ConfigFileSections() {
|
||||
if s == localRemote {
|
||||
localExists = true
|
||||
}
|
||||
if s == cacheRemote {
|
||||
cacheExists = true
|
||||
}
|
||||
if s == cryptRemote {
|
||||
cryptExists = true
|
||||
}
|
||||
}
|
||||
|
||||
localRemoteWrap := ""
|
||||
if !localExists {
|
||||
localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
|
||||
fs.ConfigFileSet(localRemote, "type", "local")
|
||||
fs.ConfigFileSet(localRemote, "nounc", "true")
|
||||
}
|
||||
|
||||
if !cacheExists {
|
||||
fs.ConfigFileSet(cacheRemote, "type", "cache")
|
||||
fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
|
||||
}
|
||||
if c, ok := cfg["chunk_size"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_size", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
|
||||
}
|
||||
if c, ok := cfg["chunk_total_size"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
|
||||
}
|
||||
if c, ok := cfg["info_age"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "info_age", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
|
||||
}
|
||||
|
||||
if !cryptExists {
|
||||
t.Skipf("Skipping due to missing crypt remote: %v", cryptRemote)
|
||||
}
|
||||
|
||||
if c, ok := cfg["cache-chunk-no-memory"]; ok {
|
||||
_ = flag.Set("cache-chunk-no-memory", c)
|
||||
} else {
|
||||
_ = flag.Set("cache-chunk-no-memory", "true")
|
||||
}
|
||||
if c, ok := cfg["cache-workers"]; ok {
|
||||
_ = flag.Set("cache-workers", c)
|
||||
} else {
|
||||
_ = flag.Set("cache-workers", strconv.Itoa(workers))
|
||||
}
|
||||
if c, ok := cfg["cache-chunk-clean-interval"]; ok {
|
||||
_ = flag.Set("cache-chunk-clean-interval", c)
|
||||
} else {
|
||||
_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
|
||||
}
|
||||
if c, ok := cfg["cache-writes"]; ok {
|
||||
_ = flag.Set("cache-writes", c)
|
||||
} else {
|
||||
_ = flag.Set("cache-writes", strconv.FormatBool(cache.DefCacheWrites))
|
||||
}
|
||||
|
||||
// Instantiate root
|
||||
f, err := fs.NewFs(cryptRemote + ":")
|
||||
require.NoError(t, err)
|
||||
if purge {
|
||||
_ = f.Features().Purge()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = f.Mkdir("")
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, boltDb
|
||||
}
|
||||
|
||||
func newLocalCacheFs(t *testing.T, localRemote, cacheRemote string, cfg map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
|
||||
chunkPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote)
|
||||
boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
localExists := false
|
||||
cacheExists := false
|
||||
for _, s := range fs.ConfigFileSections() {
|
||||
if s == localRemote {
|
||||
localExists = true
|
||||
}
|
||||
if s == cacheRemote {
|
||||
cacheExists = true
|
||||
}
|
||||
}
|
||||
|
||||
localRemoteWrap := ""
|
||||
if !localExists {
|
||||
localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
|
||||
fs.ConfigFileSet(localRemote, "type", "local")
|
||||
fs.ConfigFileSet(localRemote, "nounc", "true")
|
||||
}
|
||||
|
||||
if !cacheExists {
|
||||
fs.ConfigFileSet(cacheRemote, "type", "cache")
|
||||
fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
|
||||
}
|
||||
if c, ok := cfg["chunk_size"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_size", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
|
||||
}
|
||||
if c, ok := cfg["chunk_total_size"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
|
||||
}
|
||||
if c, ok := cfg["info_age"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "info_age", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
|
||||
}
|
||||
|
||||
if c, ok := cfg["cache-chunk-no-memory"]; ok {
|
||||
_ = flag.Set("cache-chunk-no-memory", c)
|
||||
} else {
|
||||
_ = flag.Set("cache-chunk-no-memory", "true")
|
||||
}
|
||||
if c, ok := cfg["cache-workers"]; ok {
|
||||
_ = flag.Set("cache-workers", c)
|
||||
} else {
|
||||
_ = flag.Set("cache-workers", strconv.Itoa(workers))
|
||||
}
|
||||
if c, ok := cfg["cache-chunk-clean-interval"]; ok {
|
||||
_ = flag.Set("cache-chunk-clean-interval", c)
|
||||
} else {
|
||||
_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
|
||||
}
|
||||
if c, ok := cfg["cache-writes"]; ok {
|
||||
_ = flag.Set("cache-writes", c)
|
||||
} else {
|
||||
_ = flag.Set("cache-writes", strconv.FormatBool(cache.DefCacheWrites))
|
||||
}
|
||||
|
||||
// Instantiate root
|
||||
f, err := fs.NewFs(cacheRemote + ":")
|
||||
require.NoError(t, err)
|
||||
_ = f.Features().Purge()
|
||||
require.NoError(t, err)
|
||||
err = f.Mkdir("")
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, boltDb
|
||||
}
|
||||
|
||||
//func mountFs(t *testing.T, f fs.Fs, mntPoint string) {
|
||||
// if runtime.GOOS == "windows" {
|
||||
// t.Skip("Skipping test cause on windows")
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// _ = flag.Set("debug-fuse", "false")
|
||||
//
|
||||
// go func() {
|
||||
// mountlib.DebugFUSE = false
|
||||
// mountlib.AllowOther = true
|
||||
// mount.Mount(f, mntPoint)
|
||||
// }()
|
||||
//
|
||||
// time.Sleep(time.Second * 3)
|
||||
//}
|
||||
|
||||
func unmountFs(t *testing.T, mntPoint string) {
|
||||
var out []byte
|
||||
var err error
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Skipping test cause on windows")
|
||||
return
|
||||
} else if runtime.GOOS == "linux" {
|
||||
out, err = exec.Command("fusermount", "-u", mntPoint).Output()
|
||||
} else if runtime.GOOS == "darwin" {
|
||||
out, err = exec.Command("diskutil", "unmount", mntPoint).Output()
|
||||
}
|
||||
|
||||
t.Logf("Unmount output: %v", string(out))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func getCacheFs(f fs.Fs) (*cache.Fs, error) {
|
||||
cfs, ok := f.(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
} else {
|
||||
if f.Features().UnWrap != nil {
|
||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("didn't found a cache fs")
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Fs = (*cache.Fs)(nil)
|
||||
_ fs.Fs = (*local.Fs)(nil)
|
||||
)
|
||||
77
cache/cache_test.go
vendored
Normal file
77
cache/cache_test.go
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Test Cache filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build !plan9,go1.7
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/cache"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
_ "github.com/ncw/rclone/local"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*cache.Object)(nil))
|
||||
fstests.RemoteName = "TestCache:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
6
cache/cache_unsupported.go
vendored
Normal file
6
cache/cache_unsupported.go
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 !go1.7
|
||||
|
||||
package cache
|
||||
138
cache/directory.go
vendored
Normal file
138
cache/directory.go
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
// +build !plan9,go1.7
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"path"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Directory is a generic dir that stores basic information about it
|
||||
type Directory struct {
|
||||
fs.Directory `json:"-"`
|
||||
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the directory
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
|
||||
CacheItems int64 `json:"items"` // number of objects or -1 for unknown
|
||||
CacheType string `json:"cacheType"` // object type
|
||||
CacheTs *time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NewDirectory builds an empty dir which will be used to unmarshal data in it
|
||||
func NewDirectory(f *Fs, remote string) *Directory {
|
||||
cd := ShallowDirectory(f, remote)
|
||||
t := time.Now()
|
||||
cd.CacheTs = &t
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// ShallowDirectory builds an empty dir which will be used to unmarshal data in it
|
||||
func ShallowDirectory(f *Fs, remote string) *Directory {
|
||||
var cd *Directory
|
||||
fullRemote := cleanPath(path.Join(f.Root(), remote))
|
||||
|
||||
// build a new one
|
||||
dir := cleanPath(path.Dir(fullRemote))
|
||||
name := cleanPath(path.Base(fullRemote))
|
||||
cd = &Directory{
|
||||
CacheFs: f,
|
||||
Name: name,
|
||||
Dir: dir,
|
||||
CacheModTime: time.Now().UnixNano(),
|
||||
CacheSize: 0,
|
||||
CacheItems: 0,
|
||||
CacheType: "Directory",
|
||||
}
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// DirectoryFromOriginal builds one from a generic fs.Directory
|
||||
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
||||
var cd *Directory
|
||||
fullRemote := path.Join(f.Root(), d.Remote())
|
||||
|
||||
dir := cleanPath(path.Dir(fullRemote))
|
||||
name := cleanPath(path.Base(fullRemote))
|
||||
t := time.Now()
|
||||
cd = &Directory{
|
||||
Directory: d,
|
||||
CacheFs: f,
|
||||
Name: name,
|
||||
Dir: dir,
|
||||
CacheModTime: d.ModTime().UnixNano(),
|
||||
CacheSize: d.Size(),
|
||||
CacheItems: d.Items(),
|
||||
CacheType: "Directory",
|
||||
CacheTs: &t,
|
||||
}
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// Fs returns its FS info
|
||||
func (d *Directory) Fs() fs.Info {
|
||||
return d.CacheFs
|
||||
}
|
||||
|
||||
// String returns a human friendly name for this object
|
||||
func (d *Directory) String() string {
|
||||
if d == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return d.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (d *Directory) Remote() string {
|
||||
p := cleanPath(path.Join(d.Dir, d.Name))
|
||||
if d.CacheFs.Root() != "" {
|
||||
p = p[len(d.CacheFs.Root()):] // trim out root
|
||||
if len(p) > 0 { // remove first separator
|
||||
p = p[1:]
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// abs returns the absolute path to the dir
|
||||
func (d *Directory) abs() string {
|
||||
return cleanPath(path.Join(d.Dir, d.Name))
|
||||
}
|
||||
|
||||
// parentRemote returns the absolute path parent remote
|
||||
func (d *Directory) parentRemote() string {
|
||||
absPath := d.abs()
|
||||
if absPath == "" {
|
||||
return ""
|
||||
}
|
||||
return cleanPath(path.Dir(absPath))
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (d *Directory) ModTime() time.Time {
|
||||
return time.Unix(0, d.CacheModTime)
|
||||
}
|
||||
|
||||
// Size returns the cached Size
|
||||
func (d *Directory) Size() int64 {
|
||||
return d.CacheSize
|
||||
}
|
||||
|
||||
// Items returns the cached Items
|
||||
func (d *Directory) Items() int64 {
|
||||
return d.CacheItems
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Directory = (*Directory)(nil)
|
||||
)
|
||||
506
cache/handle.go
vendored
Normal file
506
cache/handle.go
vendored
Normal file
@@ -0,0 +1,506 @@
|
||||
// +build !plan9,go1.7
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Handle is managing the read/write/seek operations on an open handle
|
||||
type Handle struct {
|
||||
cachedObject *Object
|
||||
memory ChunkStorage
|
||||
preloadQueue chan int64
|
||||
preloadOffset int64
|
||||
offset int64
|
||||
seenOffsets map[int64]bool
|
||||
mu sync.Mutex
|
||||
confirmReading chan bool
|
||||
|
||||
UseMemory bool
|
||||
workers []*worker
|
||||
closed bool
|
||||
reading bool
|
||||
}
|
||||
|
||||
// NewObjectHandle returns a new Handle for an existing Object
|
||||
func NewObjectHandle(o *Object) *Handle {
|
||||
r := &Handle{
|
||||
cachedObject: o,
|
||||
offset: 0,
|
||||
preloadOffset: -1, // -1 to trigger the first preload
|
||||
|
||||
UseMemory: o.CacheFs.chunkMemory,
|
||||
reading: false,
|
||||
}
|
||||
r.seenOffsets = make(map[int64]bool)
|
||||
r.memory = NewMemory(-1)
|
||||
|
||||
// create a larger buffer to queue up requests
|
||||
r.preloadQueue = make(chan int64, o.CacheFs.totalWorkers*10)
|
||||
r.confirmReading = make(chan bool)
|
||||
r.startReadWorkers()
|
||||
return r
|
||||
}
|
||||
|
||||
// cacheFs is a convenience method to get the parent cache FS of the object's manager
|
||||
func (r *Handle) cacheFs() *Fs {
|
||||
return r.cachedObject.CacheFs
|
||||
}
|
||||
|
||||
// storage is a convenience method to get the persistent storage of the object's manager
|
||||
func (r *Handle) storage() Storage {
|
||||
return r.cacheFs().cache
|
||||
}
|
||||
|
||||
// String representation of this reader
|
||||
func (r *Handle) String() string {
|
||||
return r.cachedObject.abs()
|
||||
}
|
||||
|
||||
// startReadWorkers will start the worker pool
|
||||
func (r *Handle) startReadWorkers() {
|
||||
if r.hasAtLeastOneWorker() {
|
||||
return
|
||||
}
|
||||
totalWorkers := r.cacheFs().totalWorkers
|
||||
|
||||
if r.cacheFs().plexConnector.isConfigured() {
|
||||
if !r.cacheFs().plexConnector.isConnected() {
|
||||
err := r.cacheFs().plexConnector.authenticate()
|
||||
if err != nil {
|
||||
fs.Infof(r, "failed to authenticate to Plex: %v", err)
|
||||
}
|
||||
}
|
||||
if r.cacheFs().plexConnector.isConnected() {
|
||||
totalWorkers = 1
|
||||
}
|
||||
}
|
||||
|
||||
r.scaleWorkers(totalWorkers)
|
||||
}
|
||||
|
||||
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||
func (r *Handle) scaleWorkers(desired int) {
|
||||
current := len(r.workers)
|
||||
if current == desired {
|
||||
return
|
||||
}
|
||||
if current > desired {
|
||||
// scale in gracefully
|
||||
for i := 0; i < current-desired; i++ {
|
||||
r.preloadQueue <- -1
|
||||
}
|
||||
} else {
|
||||
// scale out
|
||||
for i := 0; i < desired-current; i++ {
|
||||
w := &worker{
|
||||
r: r,
|
||||
ch: r.preloadQueue,
|
||||
id: current + i,
|
||||
}
|
||||
go w.run()
|
||||
|
||||
r.workers = append(r.workers, w)
|
||||
}
|
||||
}
|
||||
// ignore first scale out from 0
|
||||
if current != 0 {
|
||||
fs.Infof(r, "scale workers to %v", desired)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) requestExternalConfirmation() {
|
||||
// if there's no external confirmation available
|
||||
// then we skip this step
|
||||
if len(r.workers) >= r.cacheFs().totalMaxWorkers ||
|
||||
!r.cacheFs().plexConnector.isConnected() {
|
||||
return
|
||||
}
|
||||
go r.cacheFs().plexConnector.isPlayingAsync(r.cachedObject, r.confirmReading)
|
||||
}
|
||||
|
||||
func (r *Handle) confirmExternalReading() {
|
||||
// if we have a max value of workers
|
||||
// or there's no external confirmation available
|
||||
// then we skip this step
|
||||
if len(r.workers) >= r.cacheFs().totalMaxWorkers ||
|
||||
!r.cacheFs().plexConnector.isConnected() {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case confirmed := <-r.confirmReading:
|
||||
if !confirmed {
|
||||
return
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
fs.Infof(r, "confirmed reading by external reader")
|
||||
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
|
||||
}
|
||||
|
||||
// queueOffset will send an offset to the workers if it's different from the last one
|
||||
func (r *Handle) queueOffset(offset int64) {
|
||||
if offset != r.preloadOffset {
|
||||
// clean past in-memory chunks
|
||||
if r.UseMemory {
|
||||
go r.memory.CleanChunksByNeed(offset)
|
||||
}
|
||||
go r.cacheFs().CleanUpCache(false)
|
||||
r.confirmExternalReading()
|
||||
r.preloadOffset = offset
|
||||
|
||||
// clear the past seen chunks
|
||||
// they will remain in our persistent storage but will be removed from transient
|
||||
// so they need to be picked up by a worker
|
||||
for k := range r.seenOffsets {
|
||||
if k < offset {
|
||||
r.seenOffsets[k] = false
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
}
|
||||
if v, ok := r.seenOffsets[o]; ok && v {
|
||||
continue
|
||||
}
|
||||
|
||||
r.seenOffsets[o] = true
|
||||
r.preloadQueue <- o
|
||||
}
|
||||
|
||||
r.requestExternalConfirmation()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) hasAtLeastOneWorker() bool {
|
||||
oneWorker := false
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
if r.workers[i].isRunning() {
|
||||
oneWorker = true
|
||||
}
|
||||
}
|
||||
return oneWorker
|
||||
}
|
||||
|
||||
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
|
||||
// it can be from transient or persistent cache
|
||||
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
|
||||
func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
var data []byte
|
||||
var err error
|
||||
|
||||
// we calculate the modulus of the requested offset with the size of a chunk
|
||||
offset := chunkStart % r.cacheFs().chunkSize
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
if r.UseMemory {
|
||||
data, err = r.memory.GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().readRetries*2; i++ {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
||||
fs.Debugf(r, "%v: chunk retry storage: %v", chunkStart, i)
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
}
|
||||
}
|
||||
|
||||
// not found in ram or
|
||||
// the worker didn't managed to download the chunk in time so we abort and close the stream
|
||||
if err != nil || len(data) == 0 || !found {
|
||||
if !r.hasAtLeastOneWorker() {
|
||||
fs.Errorf(r, "out of workers")
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("chunk not found %v", chunkStart)
|
||||
}
|
||||
|
||||
// first chunk will be aligned with the start
|
||||
if offset > 0 {
|
||||
if offset >= int64(len(data)) {
|
||||
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
data = data[int(offset):]
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Read a chunk from storage or len(p)
|
||||
func (r *Handle) Read(p []byte) (n int, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
var buf []byte
|
||||
|
||||
// first reading
|
||||
if !r.reading {
|
||||
r.reading = true
|
||||
r.requestExternalConfirmation()
|
||||
}
|
||||
// reached EOF
|
||||
if r.offset >= r.cachedObject.Size() {
|
||||
return 0, io.EOF
|
||||
}
|
||||
currentOffset := r.offset
|
||||
buf, err = r.getChunk(currentOffset)
|
||||
if err != nil && len(buf) == 0 {
|
||||
fs.Errorf(r, "(%v/%v) error (%v) response", currentOffset, r.cachedObject.Size(), err)
|
||||
return 0, io.EOF
|
||||
}
|
||||
readSize := copy(p, buf)
|
||||
newOffset := currentOffset + int64(readSize)
|
||||
r.offset = newOffset
|
||||
|
||||
return readSize, err
|
||||
}
|
||||
|
||||
// Close will tell the workers to stop
|
||||
func (r *Handle) Close() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.closed {
|
||||
return errors.New("file already closed")
|
||||
}
|
||||
|
||||
close(r.preloadQueue)
|
||||
r.closed = true
|
||||
// wait for workers to complete their jobs before returning
|
||||
waitCount := 3
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
waitIdx := 0
|
||||
for r.workers[i].isRunning() && waitIdx < waitCount {
|
||||
time.Sleep(time.Second)
|
||||
waitIdx++
|
||||
}
|
||||
}
|
||||
|
||||
go r.cacheFs().CleanUpCache(false)
|
||||
fs.Debugf(r, "cache reader closed %v", r.offset)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Seek will move the current offset based on whence and instruct the workers to move there too
|
||||
func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
var err error
|
||||
switch whence {
|
||||
case os.SEEK_SET:
|
||||
fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset)
|
||||
r.offset = offset
|
||||
case os.SEEK_CUR:
|
||||
fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset)
|
||||
r.offset += offset
|
||||
case os.SEEK_END:
|
||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||
r.offset = r.cachedObject.Size() + offset
|
||||
default:
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
|
||||
if chunkStart >= r.cacheFs().chunkSize {
|
||||
chunkStart = chunkStart - r.cacheFs().chunkSize
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
return r.offset, err
|
||||
}
|
||||
|
||||
type worker struct {
|
||||
r *Handle
|
||||
ch <-chan int64
|
||||
rc io.ReadCloser
|
||||
id int
|
||||
running bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// String is a representation of this worker
|
||||
func (w *worker) String() string {
|
||||
return fmt.Sprintf("worker-%v <%v>", w.id, w.r.cachedObject.Name)
|
||||
}
|
||||
|
||||
// reader will return a reader depending on the capabilities of the source reader:
|
||||
// - if it supports seeking it will seek to the desired offset and return the same reader
|
||||
// - if it doesn't support seeking it will close a possible existing one and open at the desired offset
|
||||
// - if there's no reader associated with this worker, it will create one
|
||||
func (w *worker) reader(offset, end int64) (io.ReadCloser, error) {
|
||||
var err error
|
||||
r := w.rc
|
||||
if w.rc == nil {
|
||||
r, err = w.r.cacheFs().OpenRateLimited(func() (io.ReadCloser, error) {
|
||||
return w.r.cachedObject.Object.Open(&fs.SeekOption{Offset: offset}, &fs.RangeOption{Start: offset, End: end})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
seekerObj, ok := r.(io.Seeker)
|
||||
if ok {
|
||||
_, err = seekerObj.Seek(offset, os.SEEK_SET)
|
||||
return r, err
|
||||
}
|
||||
|
||||
_ = w.rc.Close()
|
||||
return w.r.cacheFs().OpenRateLimited(func() (io.ReadCloser, error) {
|
||||
r, err = w.r.cachedObject.Object.Open(&fs.SeekOption{Offset: offset}, &fs.RangeOption{Start: offset, End: end})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (w *worker) isRunning() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.running
|
||||
}
|
||||
|
||||
func (w *worker) setRunning(f bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.running = f
|
||||
}
|
||||
|
||||
// run is the main loop for the worker which receives offsets to preload
|
||||
func (w *worker) run() {
|
||||
var err error
|
||||
var data []byte
|
||||
defer w.setRunning(false)
|
||||
defer func() {
|
||||
if w.rc != nil {
|
||||
_ = w.rc.Close()
|
||||
w.setRunning(false)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
chunkStart, open := <-w.ch
|
||||
w.setRunning(true)
|
||||
if chunkStart < 0 || !open {
|
||||
break
|
||||
}
|
||||
|
||||
// skip if it exists
|
||||
if w.r.UseMemory {
|
||||
if w.r.memory.HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
|
||||
// add it in ram if it's in the persistent storage
|
||||
data, err = w.r.storage().GetChunk(w.r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
|
||||
// TODO: Remove this comment if it proves to be reliable for #1896
|
||||
//if chunkEnd > w.r.cachedObject.Size() {
|
||||
// chunkEnd = w.r.cachedObject.Size()
|
||||
//}
|
||||
|
||||
w.download(chunkStart, chunkEnd, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
var err error
|
||||
var data []byte
|
||||
|
||||
// stop retries
|
||||
if retry >= w.r.cacheFs().readRetries {
|
||||
return
|
||||
}
|
||||
// back-off between retries
|
||||
if retry > 0 {
|
||||
time.Sleep(time.Second * time.Duration(retry))
|
||||
}
|
||||
|
||||
w.rc, err = w.reader(chunkStart, chunkEnd)
|
||||
// we seem to be getting only errors so we abort
|
||||
if err != nil {
|
||||
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
||||
w.download(chunkStart, chunkEnd, retry+1)
|
||||
return
|
||||
}
|
||||
|
||||
data = make([]byte, chunkEnd-chunkStart)
|
||||
sourceRead := 0
|
||||
sourceRead, err = io.ReadFull(w.rc, data)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||
w.download(chunkStart, chunkEnd, retry+1)
|
||||
return
|
||||
}
|
||||
data = data[:sourceRead] // reslice to remove extra garbage
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(w, "partial downloaded chunk %v", fs.SizeSuffix(chunkStart))
|
||||
} else {
|
||||
fs.Debugf(w, "downloaded chunk %v", fs.SizeSuffix(chunkStart))
|
||||
}
|
||||
|
||||
if w.r.UseMemory {
|
||||
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = w.r.storage().AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in storage %v: %v", chunkStart, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ io.ReadCloser = (*Handle)(nil)
|
||||
_ io.Seeker = (*Handle)(nil)
|
||||
)
|
||||
315
cache/object.go
vendored
Normal file
315
cache/object.go
vendored
Normal file
@@ -0,0 +1,315 @@
|
||||
// +build !plan9,go1.7
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"strconv"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Object is a generic file like object that stores basic information about it
|
||||
type Object struct {
|
||||
fs.Object `json:"-"`
|
||||
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the object
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
CacheStorable bool `json:"storable"` // says whether this object can be stored
|
||||
CacheType string `json:"cacheType"`
|
||||
CacheTs time.Time `json:"cacheTs"`
|
||||
cacheHashes map[fs.HashType]string // all supported hashes cached
|
||||
|
||||
refreshMutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewObject builds one from a generic fs.Object
|
||||
func NewObject(f *Fs, remote string) *Object { //0745 379 768
|
||||
fullRemote := path.Join(f.Root(), remote)
|
||||
dir, name := path.Split(fullRemote)
|
||||
|
||||
co := &Object{
|
||||
CacheFs: f,
|
||||
Name: cleanPath(name),
|
||||
Dir: cleanPath(dir),
|
||||
CacheModTime: time.Now().UnixNano(),
|
||||
CacheSize: 0,
|
||||
CacheStorable: false,
|
||||
CacheType: "Object",
|
||||
CacheTs: time.Now(),
|
||||
}
|
||||
return co
|
||||
}
|
||||
|
||||
// MarshalJSON is needed to override the hashes map (needed to support older versions of Go)
|
||||
func (o *Object) MarshalJSON() ([]byte, error) {
|
||||
hashes := make(map[string]string)
|
||||
for k, v := range o.cacheHashes {
|
||||
hashes[strconv.Itoa(int(k))] = v
|
||||
}
|
||||
|
||||
type Alias Object
|
||||
return json.Marshal(&struct {
|
||||
Hashes map[string]string `json:"hashes"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(o),
|
||||
Hashes: hashes,
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON is needed to override the CacheHashes map (needed to support older versions of Go)
|
||||
func (o *Object) UnmarshalJSON(b []byte) error {
|
||||
type Alias Object
|
||||
aux := &struct {
|
||||
Hashes map[string]string `json:"hashes"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(o),
|
||||
}
|
||||
if err := json.Unmarshal(b, &aux); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.cacheHashes = make(map[fs.HashType]string)
|
||||
for k, v := range aux.Hashes {
|
||||
ht, _ := strconv.Atoi(k)
|
||||
o.cacheHashes[fs.HashType(ht)] = v
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ObjectFromOriginal builds one from a generic fs.Object
|
||||
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
var co *Object
|
||||
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
||||
|
||||
dir, name := path.Split(fullRemote)
|
||||
co = &Object{
|
||||
CacheFs: f,
|
||||
Name: cleanPath(name),
|
||||
Dir: cleanPath(dir),
|
||||
CacheType: "Object",
|
||||
CacheTs: time.Now(),
|
||||
}
|
||||
co.updateData(o)
|
||||
return co
|
||||
}
|
||||
|
||||
func (o *Object) updateData(source fs.Object) {
|
||||
o.Object = source
|
||||
o.CacheModTime = source.ModTime().UnixNano()
|
||||
o.CacheSize = source.Size()
|
||||
o.CacheStorable = source.Storable()
|
||||
o.CacheTs = time.Now()
|
||||
o.cacheHashes = make(map[fs.HashType]string)
|
||||
}
|
||||
|
||||
// Fs returns its FS info
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.CacheFs
|
||||
}
|
||||
|
||||
// String returns a human friendly name for this object
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
p := path.Join(o.Dir, o.Name)
|
||||
if o.CacheFs.Root() != "" {
|
||||
p = p[len(o.CacheFs.Root()):] // trim out root
|
||||
if len(p) > 0 { // remove first separator
|
||||
p = p[1:]
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// abs returns the absolute path to the object
|
||||
func (o *Object) abs() string {
|
||||
return path.Join(o.Dir, o.Name)
|
||||
}
|
||||
|
||||
// parentRemote returns the absolute path parent remote
|
||||
func (o *Object) parentRemote() string {
|
||||
absPath := o.abs()
|
||||
return cleanPath(path.Dir(absPath))
|
||||
}
|
||||
|
||||
// parentDir returns the absolute path parent remote
|
||||
func (o *Object) parentDir() *Directory {
|
||||
return NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote())))
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return time.Unix(0, o.CacheModTime)
|
||||
}
|
||||
|
||||
// Size returns the cached Size
|
||||
func (o *Object) Size() int64 {
|
||||
return o.CacheSize
|
||||
}
|
||||
|
||||
// Storable returns the cached Storable
|
||||
func (o *Object) Storable() bool {
|
||||
return o.CacheStorable
|
||||
}
|
||||
|
||||
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
||||
func (o *Object) refreshFromSource() error {
|
||||
o.refreshMutex.Lock()
|
||||
defer o.refreshMutex.Unlock()
|
||||
|
||||
if o.Object != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
liveObject, err := o.CacheFs.Fs.NewObject(o.Remote())
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error refreshing object: %v", err)
|
||||
return err
|
||||
}
|
||||
o.updateData(liveObject)
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModTime sets the ModTime of this object
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
if err := o.refreshFromSource(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := o.Object.SetModTime(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.CacheModTime = t.UnixNano()
|
||||
o.persist()
|
||||
fs.Debugf(o.Fs(), "updated ModTime %v: %v", o, t)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open is used to request a specific part of the file using fs.RangeOption
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
if err := o.refreshFromSource(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
cacheReader := NewObjectHandle(o)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
_, err = cacheReader.Seek(x.Offset, os.SEEK_SET)
|
||||
case *fs.RangeOption:
|
||||
_, err = cacheReader.Seek(x.Start, os.SEEK_SET)
|
||||
}
|
||||
if err != nil {
|
||||
return cacheReader, err
|
||||
}
|
||||
}
|
||||
|
||||
return cacheReader, nil
|
||||
}
|
||||
|
||||
// Update will change the object data
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.refreshFromSource(); err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(o, "updating object contents with size %v", src.Size())
|
||||
|
||||
// deleting cached chunks and info to be replaced with new ones
|
||||
_ = o.CacheFs.cache.RemoveObject(o.abs())
|
||||
|
||||
err := o.Object.Update(in, src, options...)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error updating source: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
o.CacheModTime = src.ModTime().UnixNano()
|
||||
o.CacheSize = src.Size()
|
||||
o.cacheHashes = make(map[fs.HashType]string)
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the object from both the cache and the source
|
||||
func (o *Object) Remove() error {
|
||||
if err := o.refreshFromSource(); err != nil {
|
||||
return err
|
||||
}
|
||||
err := o.Object.Remove()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(o, "removing object")
|
||||
|
||||
_ = o.CacheFs.cache.RemoveObject(o.abs())
|
||||
return err
|
||||
}
|
||||
|
||||
// Hash requests a hash of the object and stores in the cache
|
||||
// since it might or might not be called, this is lazy loaded
|
||||
func (o *Object) Hash(ht fs.HashType) (string, error) {
|
||||
if o.cacheHashes == nil {
|
||||
o.cacheHashes = make(map[fs.HashType]string)
|
||||
}
|
||||
|
||||
cachedHash, found := o.cacheHashes[ht]
|
||||
if found {
|
||||
return cachedHash, nil
|
||||
}
|
||||
|
||||
if err := o.refreshFromSource(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
liveHash, err := o.Object.Hash(ht)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
o.cacheHashes[ht] = liveHash
|
||||
|
||||
o.persist()
|
||||
fs.Debugf(o, "object hash cached: %v", liveHash)
|
||||
|
||||
return liveHash, nil
|
||||
}
|
||||
|
||||
// persist adds this object to the persistent cache
|
||||
func (o *Object) persist() *Object {
|
||||
err := o.CacheFs.cache.AddObject(o)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "failed to cache object: %v", err)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
244
cache/plex.go
vendored
Normal file
244
cache/plex.go
vendored
Normal file
@@ -0,0 +1,244 @@
|
||||
// +build !plan9,go1.7
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
// defPlexLoginURL is the default URL for Plex login
|
||||
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
|
||||
)
|
||||
|
||||
// plexConnector is managing the cache integration with Plex
|
||||
type plexConnector struct {
|
||||
url *url.URL
|
||||
username string
|
||||
password string
|
||||
token string
|
||||
f *Fs
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
username: username,
|
||||
password: password,
|
||||
token: "",
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
token: token,
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// fillDefaultHeaders will add common headers to requests
|
||||
func (p *plexConnector) fillDefaultHeaders(req *http.Request) {
|
||||
req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String()))
|
||||
req.Header.Add("X-Plex-Product", fmt.Sprintf("rclone (%v)", p.f.Name()))
|
||||
req.Header.Add("X-Plex-Version", fs.Version)
|
||||
req.Header.Add("Accept", "application/json")
|
||||
if p.token != "" {
|
||||
req.Header.Add("X-Plex-Token", p.token)
|
||||
}
|
||||
}
|
||||
|
||||
// authenticate will generate a token based on a username/password
|
||||
func (p *plexConnector) authenticate() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
form := url.Values{}
|
||||
form.Set("user[login]", p.username)
|
||||
form.Add("user[password]", p.password)
|
||||
req, err := http.NewRequest("POST", defPlexLoginURL, strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %v", err)
|
||||
}
|
||||
tokenGen, ok := get(data, "user", "authToken")
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to obtain token: %v", data)
|
||||
}
|
||||
token, ok := tokenGen.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to obtain token: %v", data)
|
||||
}
|
||||
p.token = token
|
||||
if p.token != "" {
|
||||
fs.ConfigFileSet(p.f.Name(), "plex_token", p.token)
|
||||
fs.SaveConfig()
|
||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isConnected checks if this rclone is authenticated to Plex
|
||||
func (p *plexConnector) isConnected() bool {
|
||||
return p.token != ""
|
||||
}
|
||||
|
||||
// isConfigured checks if this rclone is configured to use a Plex server
|
||||
func (p *plexConnector) isConfigured() bool {
|
||||
return p.url != nil
|
||||
}
|
||||
|
||||
func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
isPlaying := false
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s/status/sessions", p.url.String()), nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
sizeGen, ok := get(data, "MediaContainer", "size")
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
size, ok := sizeGen.(float64)
|
||||
if !ok || size < float64(1) {
|
||||
return false
|
||||
}
|
||||
videosGen, ok := get(data, "MediaContainer", "Video")
|
||||
if !ok {
|
||||
fs.Errorf("plex", "empty videos: %v", data)
|
||||
return false
|
||||
}
|
||||
videos, ok := videosGen.([]interface{})
|
||||
if !ok || len(videos) < 1 {
|
||||
fs.Errorf("plex", "empty videos: %v", data)
|
||||
return false
|
||||
}
|
||||
for _, v := range videos {
|
||||
keyGen, ok := get(v, "key")
|
||||
if !ok {
|
||||
fs.Errorf("plex", "failed to find: key")
|
||||
continue
|
||||
}
|
||||
key, ok := keyGen.(string)
|
||||
if !ok {
|
||||
fs.Errorf("plex", "failed to understand: key")
|
||||
continue
|
||||
}
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), key), nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
remote := co.Remote()
|
||||
if cr, yes := co.CacheFs.isWrappedByCrypt(); yes {
|
||||
remote, err = cr.DecryptFileName(co.Remote())
|
||||
if err != nil {
|
||||
fs.Errorf("plex", "can not decrypt wrapped file: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
fpGen, ok := get(data, "MediaContainer", "Metadata", 0, "Media", 0, "Part", 0, "file")
|
||||
if !ok {
|
||||
fs.Errorf("plex", "failed to understand: %v", data)
|
||||
continue
|
||||
}
|
||||
fp, ok := fpGen.(string)
|
||||
if !ok {
|
||||
fs.Errorf("plex", "failed to understand: %v", fp)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(fp, remote) {
|
||||
isPlaying = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return isPlaying
|
||||
}
|
||||
|
||||
func (p *plexConnector) isPlayingAsync(co *Object, response chan bool) {
|
||||
time.Sleep(time.Second) // FIXME random guess here
|
||||
res := p.isPlaying(co)
|
||||
response <- res
|
||||
}
|
||||
|
||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
for _, p := range path {
|
||||
switch idx := p.(type) {
|
||||
case string:
|
||||
if mm, ok := m.(map[string]interface{}); ok {
|
||||
if val, found := mm[idx]; found {
|
||||
m = val
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
case int:
|
||||
if mm, ok := m.([]interface{}); ok {
|
||||
if len(mm) > idx {
|
||||
m = mm[idx]
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
return m, true
|
||||
}
|
||||
100
cache/storage_memory.go
vendored
Normal file
100
cache/storage_memory.go
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
// +build !plan9,go1.7
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Memory is a wrapper of transient storage for a go-cache store
|
||||
type Memory struct {
|
||||
ChunkStorage
|
||||
|
||||
db *cache.Cache
|
||||
}
|
||||
|
||||
// NewMemory builds this cache storage
|
||||
// defaultExpiration will set the expiry time of chunks in this storage
|
||||
func NewMemory(defaultExpiration time.Duration) *Memory {
|
||||
mem := &Memory{}
|
||||
err := mem.Connect(defaultExpiration)
|
||||
if err != nil {
|
||||
fs.Errorf("cache", "can't open ram connection: %v", err)
|
||||
}
|
||||
|
||||
return mem
|
||||
}
|
||||
|
||||
// Connect will create a connection for the storage
|
||||
func (m *Memory) Connect(defaultExpiration time.Duration) error {
|
||||
m.db = cache.New(defaultExpiration, -1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasChunk confirms the existence of a single chunk of an object
|
||||
func (m *Memory) HasChunk(cachedObject *Object, offset int64) bool {
|
||||
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
|
||||
_, found := m.db.Get(key)
|
||||
return found
|
||||
}
|
||||
|
||||
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
|
||||
func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
||||
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
|
||||
var data []byte
|
||||
|
||||
if x, found := m.db.Get(key); found {
|
||||
data = x.([]byte)
|
||||
return data, nil
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
|
||||
}
|
||||
|
||||
// AddChunk adds a new chunk of a cached object
|
||||
func (m *Memory) AddChunk(fp string, data []byte, offset int64) error {
|
||||
return m.AddChunkAhead(fp, data, offset, time.Second)
|
||||
}
|
||||
|
||||
// AddChunkAhead adds a new chunk of a cached object
|
||||
func (m *Memory) AddChunkAhead(fp string, data []byte, offset int64, t time.Duration) error {
|
||||
key := fp + "-" + strconv.FormatInt(offset, 10)
|
||||
m.db.Set(key, data, cache.DefaultExpiration)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanChunksByAge will cleanup on a cron basis
|
||||
func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
|
||||
m.db.DeleteExpired()
|
||||
}
|
||||
|
||||
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
||||
func (m *Memory) CleanChunksByNeed(offset int64) {
|
||||
var items map[string]cache.Item
|
||||
|
||||
items = m.db.Items()
|
||||
for key := range items {
|
||||
sepIdx := strings.LastIndex(key, "-")
|
||||
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
||||
if err != nil {
|
||||
fs.Errorf("cache", "couldn't parse offset entry %v", key)
|
||||
continue
|
||||
}
|
||||
|
||||
if keyOffset < offset {
|
||||
m.db.Delete(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
|
||||
func (m *Memory) CleanChunksBySize(maxSize int64) {
|
||||
// NOOP
|
||||
}
|
||||
721
cache/storage_persistent.go
vendored
Normal file
721
cache/storage_persistent.go
vendored
Normal file
@@ -0,0 +1,721 @@
|
||||
// +build !plan9,go1.7
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"io/ioutil"
|
||||
|
||||
bolt "github.com/coreos/bbolt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
RootBucket = "root"
|
||||
RootTsBucket = "rootTs"
|
||||
DataTsBucket = "dataTs"
|
||||
)
|
||||
|
||||
// Features flags for this storage type
|
||||
type Features struct {
|
||||
PurgeDb bool // purge the db before starting
|
||||
}
|
||||
|
||||
var boltMap = make(map[string]*Persistent)
|
||||
var boltMapMx sync.Mutex
|
||||
|
||||
// GetPersistent returns a single instance for the specific store
|
||||
func GetPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) {
|
||||
// write lock to create one
|
||||
boltMapMx.Lock()
|
||||
defer boltMapMx.Unlock()
|
||||
if b, ok := boltMap[dbPath]; ok {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
bb, err := newPersistent(dbPath, chunkPath, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
boltMap[dbPath] = bb
|
||||
return boltMap[dbPath], nil
|
||||
}
|
||||
|
||||
type chunkInfo struct {
|
||||
Path string
|
||||
Offset int64
|
||||
Size int64
|
||||
}
|
||||
|
||||
// Persistent is a wrapper of persistent storage for a bolt.DB file
|
||||
type Persistent struct {
|
||||
Storage
|
||||
|
||||
dbPath string
|
||||
dataPath string
|
||||
db *bolt.DB
|
||||
cleanupMux sync.Mutex
|
||||
features *Features
|
||||
}
|
||||
|
||||
// newPersistent builds a new wrapper and connects to the bolt.DB file
|
||||
func newPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) {
|
||||
b := &Persistent{
|
||||
dbPath: dbPath,
|
||||
dataPath: chunkPath,
|
||||
features: f,
|
||||
}
|
||||
|
||||
err := b.Connect()
|
||||
if err != nil {
|
||||
fs.Errorf(dbPath, "Error opening storage cache. Is there another rclone running on the same remote? %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// String will return a human friendly string for this DB (currently the dbPath)
|
||||
func (b *Persistent) String() string {
|
||||
return "<Cache DB> " + b.dbPath
|
||||
}
|
||||
|
||||
// Connect creates a connection to the configured file
|
||||
// refreshDb will delete the file before to create an empty DB if it's set to true
|
||||
func (b *Persistent) Connect() error {
|
||||
var db *bolt.DB
|
||||
var err error
|
||||
|
||||
if b.features.PurgeDb {
|
||||
err := os.Remove(b.dbPath)
|
||||
if err != nil {
|
||||
fs.Errorf(b, "failed to remove cache file: %v", err)
|
||||
}
|
||||
err = os.RemoveAll(b.dataPath)
|
||||
if err != nil {
|
||||
fs.Errorf(b, "failed to remove cache data: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||
}
|
||||
db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: 1 * time.Second})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||
}
|
||||
|
||||
_ = db.Update(func(tx *bolt.Tx) error {
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(RootBucket))
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket))
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket))
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
b.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBucket prepares and cleans a specific path of the form: /var/tmp and will iterate through each path component
|
||||
// to get to the nested bucket of the final part (in this example: tmp)
|
||||
func (b *Persistent) getBucket(dir string, createIfMissing bool, tx *bolt.Tx) *bolt.Bucket {
|
||||
cleanPath(dir)
|
||||
|
||||
entries := strings.FieldsFunc(dir, func(c rune) bool {
|
||||
return os.PathSeparator == c
|
||||
})
|
||||
bucket := tx.Bucket([]byte(RootBucket))
|
||||
|
||||
for _, entry := range entries {
|
||||
if createIfMissing {
|
||||
bucket, _ = bucket.CreateBucketIfNotExists([]byte(entry))
|
||||
} else {
|
||||
bucket = bucket.Bucket([]byte(entry))
|
||||
}
|
||||
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return bucket
|
||||
}
|
||||
|
||||
// AddDir will update a CachedDirectory metadata and all its entries
|
||||
func (b *Persistent) AddDir(cachedDir *Directory) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedDir.abs(), true, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDir)
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(cachedDir)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
}
|
||||
err = bucket.Put([]byte("."), encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetDirEntries will return a CachedDirectory, its list of dir entries and/or an error if it encountered issues
|
||||
func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error) {
|
||||
var dirEntries fs.DirEntries
|
||||
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
||||
}
|
||||
|
||||
val := bucket.Get([]byte("."))
|
||||
if val != nil {
|
||||
err := json.Unmarshal(val, cachedDir)
|
||||
if err != nil {
|
||||
return errors.Errorf("error during unmarshalling obj: %v", err)
|
||||
}
|
||||
} else {
|
||||
return errors.Errorf("missing cached dir: %v", cachedDir)
|
||||
}
|
||||
|
||||
c := bucket.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
// ignore metadata key: .
|
||||
if bytes.Equal(k, []byte(".")) {
|
||||
continue
|
||||
}
|
||||
entryPath := path.Join(cachedDir.Remote(), string(k))
|
||||
|
||||
if v == nil { // directory
|
||||
// we try to find a cached meta for the dir
|
||||
currentBucket := c.Bucket().Bucket(k)
|
||||
if currentBucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", string(k))
|
||||
}
|
||||
|
||||
metaKey := currentBucket.Get([]byte("."))
|
||||
d := NewDirectory(cachedDir.CacheFs, entryPath)
|
||||
if metaKey != nil { //if we don't find it, we create an empty dir
|
||||
err := json.Unmarshal(metaKey, d)
|
||||
if err != nil { // if even this fails, we fallback to an empty dir
|
||||
fs.Debugf(string(k), "error during unmarshalling obj: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
dirEntries = append(dirEntries, d)
|
||||
} else { // object
|
||||
o := NewObject(cachedDir.CacheFs, entryPath)
|
||||
err := json.Unmarshal(v, o)
|
||||
if err != nil {
|
||||
fs.Debugf(string(k), "error during unmarshalling obj: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
dirEntries = append(dirEntries, o)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return dirEntries, err
|
||||
}
|
||||
|
||||
// RemoveDir will delete a CachedDirectory, all its objects and all the chunks stored for it
|
||||
func (b *Persistent) RemoveDir(fp string) error {
|
||||
var err error
|
||||
parentDir, dirName := path.Split(fp)
|
||||
if fp == "" {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
err := tx.DeleteBucket([]byte(RootBucket))
|
||||
if err != nil {
|
||||
fs.Debugf(fp, "couldn't delete from cache: %v", err)
|
||||
return err
|
||||
}
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(RootBucket))
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", fp)
|
||||
}
|
||||
// delete the cached dir
|
||||
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
||||
if err != nil {
|
||||
fs.Debugf(fp, "couldn't delete from cache: %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// delete chunks on disk
|
||||
// safe to ignore as the files might not have been open
|
||||
if err == nil {
|
||||
_ = os.RemoveAll(path.Join(b.dataPath, fp))
|
||||
_ = os.MkdirAll(b.dataPath, os.ModePerm)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ExpireDir will flush a CachedDirectory and all its objects from the objects
|
||||
// chunks will remain as they are
|
||||
func (b *Persistent) ExpireDir(cd *Directory) error {
|
||||
t := time.Now().Add(cd.CacheFs.fileAge * -1)
|
||||
cd.CacheTs = &t
|
||||
|
||||
// expire all parents
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
// expire all the parents
|
||||
currentDir := cd.abs()
|
||||
for { // until we get to the root
|
||||
bucket := b.getBucket(currentDir, false, tx)
|
||||
if bucket != nil {
|
||||
val := bucket.Get([]byte("."))
|
||||
if val != nil {
|
||||
cd2 := &Directory{CacheFs: cd.CacheFs}
|
||||
err := json.Unmarshal(val, cd2)
|
||||
if err == nil {
|
||||
fs.Debugf(cd, "cache: expired %v", currentDir)
|
||||
cd2.CacheTs = &t
|
||||
enc2, _ := json.Marshal(cd2)
|
||||
_ = bucket.Put([]byte("."), enc2)
|
||||
}
|
||||
}
|
||||
}
|
||||
if currentDir == "" {
|
||||
break
|
||||
}
|
||||
currentDir = cleanPath(path.Dir(currentDir))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetObject will return a CachedObject from its parent directory or an error if it doesn't find it
|
||||
func (b *Persistent) GetObject(cachedObject *Object) (err error) {
|
||||
return b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
||||
}
|
||||
val := bucket.Get([]byte(cachedObject.Name))
|
||||
if val != nil {
|
||||
return json.Unmarshal(val, cachedObject)
|
||||
}
|
||||
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
|
||||
})
|
||||
}
|
||||
|
||||
// AddObject will create a cached object in its parent directory
|
||||
func (b *Persistent) AddObject(cachedObject *Object) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
|
||||
}
|
||||
// cache Object Info
|
||||
encoded, err := json.Marshal(cachedObject)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveObject will delete a single cached object and all the chunks which belong to it
|
||||
func (b *Persistent) RemoveObject(fp string) error {
|
||||
parentDir, objName := path.Split(fp)
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
||||
}
|
||||
err := bucket.Delete([]byte(cleanPath(objName)))
|
||||
if err != nil {
|
||||
fs.Debugf(fp, "couldn't delete obj from storage: %v", err)
|
||||
}
|
||||
// delete chunks on disk
|
||||
// safe to ignore as the file might not have been open
|
||||
_ = os.RemoveAll(path.Join(b.dataPath, fp))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// HasEntry confirms the existence of a single entry (dir or object)
|
||||
func (b *Persistent) HasEntry(remote string) bool {
|
||||
dir, name := path.Split(remote)
|
||||
dir = cleanPath(dir)
|
||||
name = cleanPath(name)
|
||||
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(dir, false, tx)
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open parent bucket for %v", remote)
|
||||
}
|
||||
if f := bucket.Bucket([]byte(name)); f != nil {
|
||||
return nil
|
||||
}
|
||||
if f := bucket.Get([]byte(name)); f != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Errorf("couldn't find object (%v)", remote)
|
||||
})
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HasChunk confirms the existence of a single chunk of an object
|
||||
func (b *Persistent) HasChunk(cachedObject *Object, offset int64) bool {
|
||||
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
||||
if _, err := os.Stat(fp); !os.IsNotExist(err) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
|
||||
func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
||||
var data []byte
|
||||
|
||||
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
||||
data, err := ioutil.ReadFile(fp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
// AddChunk adds a new chunk of a cached object
|
||||
func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
|
||||
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
||||
|
||||
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
||||
err := ioutil.WriteFile(filePath, data, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
tsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||
ts := time.Now()
|
||||
found := false
|
||||
|
||||
// delete (older) timestamps for the same object
|
||||
c := tsBucket.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
var ci chunkInfo
|
||||
err = json.Unmarshal(v, &ci)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if ci.Path == fp && ci.Offset == offset {
|
||||
if tsInCache := time.Unix(0, btoi(k)); tsInCache.After(ts) && !found {
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
err := c.Delete()
|
||||
if err != nil {
|
||||
fs.Debugf(fp, "failed to clean chunk: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// don't overwrite if a newer one is already there
|
||||
if found {
|
||||
return nil
|
||||
}
|
||||
enc, err := json.Marshal(chunkInfo{Path: fp, Offset: offset, Size: int64(len(data))})
|
||||
if err != nil {
|
||||
fs.Debugf(fp, "failed to timestamp chunk: %v", err)
|
||||
}
|
||||
err = tsBucket.Put(itob(ts.UnixNano()), enc)
|
||||
if err != nil {
|
||||
fs.Debugf(fp, "failed to timestamp chunk: %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// CleanChunksByAge will cleanup on a cron basis
|
||||
func (b *Persistent) CleanChunksByAge(chunkAge time.Duration) {
|
||||
// NOOP
|
||||
}
|
||||
|
||||
// CleanChunksByNeed is a noop for this implementation
|
||||
func (b *Persistent) CleanChunksByNeed(offset int64) {
|
||||
// noop: we want to clean a Bolt DB by time only
|
||||
}
|
||||
|
||||
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
|
||||
func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
b.cleanupMux.Lock()
|
||||
defer b.cleanupMux.Unlock()
|
||||
var cntChunks int
|
||||
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||
if dataTsBucket == nil {
|
||||
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||
}
|
||||
// iterate through ts
|
||||
c := dataTsBucket.Cursor()
|
||||
totalSize := int64(0)
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
var ci chunkInfo
|
||||
err := json.Unmarshal(v, &ci)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
totalSize += ci.Size
|
||||
}
|
||||
|
||||
if totalSize > maxSize {
|
||||
needToClean := totalSize - maxSize
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
var ci chunkInfo
|
||||
err := json.Unmarshal(v, &ci)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// delete this ts entry
|
||||
err = c.Delete()
|
||||
if err != nil {
|
||||
fs.Errorf(ci.Path, "failed deleting chunk ts during cleanup (%v): %v", ci.Offset, err)
|
||||
continue
|
||||
}
|
||||
err = os.Remove(path.Join(b.dataPath, ci.Path, strconv.FormatInt(ci.Offset, 10)))
|
||||
if err == nil {
|
||||
cntChunks++
|
||||
needToClean -= ci.Size
|
||||
if needToClean <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fs.Infof("cache", "deleted (%v) chunks", cntChunks)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if err == bolt.ErrDatabaseNotOpen {
|
||||
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
|
||||
return
|
||||
}
|
||||
fs.Errorf("cache", "cleanup failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Stats returns a go map with the stats key values
|
||||
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
||||
r := make(map[string]map[string]interface{})
|
||||
r["data"] = make(map[string]interface{})
|
||||
r["data"]["oldest-ts"] = time.Now()
|
||||
r["data"]["oldest-file"] = ""
|
||||
r["data"]["newest-ts"] = time.Now()
|
||||
r["data"]["newest-file"] = ""
|
||||
r["data"]["total-chunks"] = 0
|
||||
r["data"]["total-size"] = int64(0)
|
||||
r["files"] = make(map[string]interface{})
|
||||
r["files"]["oldest-ts"] = time.Now()
|
||||
r["files"]["oldest-name"] = ""
|
||||
r["files"]["newest-ts"] = time.Now()
|
||||
r["files"]["newest-name"] = ""
|
||||
r["files"]["total-files"] = 0
|
||||
|
||||
_ = b.db.View(func(tx *bolt.Tx) error {
|
||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||
rootTsBucket := tx.Bucket([]byte(RootTsBucket))
|
||||
|
||||
var totalDirs int
|
||||
var totalFiles int
|
||||
_ = b.iterateBuckets(tx.Bucket([]byte(RootBucket)), func(name string) {
|
||||
totalDirs++
|
||||
}, func(key string, val []byte) {
|
||||
totalFiles++
|
||||
})
|
||||
r["files"]["total-dir"] = totalDirs
|
||||
r["files"]["total-files"] = totalFiles
|
||||
|
||||
c := dataTsBucket.Cursor()
|
||||
|
||||
totalChunks := 0
|
||||
totalSize := int64(0)
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
var ci chunkInfo
|
||||
err := json.Unmarshal(v, &ci)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
totalChunks++
|
||||
totalSize += ci.Size
|
||||
}
|
||||
r["data"]["total-chunks"] = totalChunks
|
||||
r["data"]["total-size"] = totalSize
|
||||
|
||||
if k, v := c.First(); k != nil {
|
||||
var ci chunkInfo
|
||||
_ = json.Unmarshal(v, &ci)
|
||||
r["data"]["oldest-ts"] = time.Unix(0, btoi(k))
|
||||
r["data"]["oldest-file"] = ci.Path
|
||||
}
|
||||
if k, v := c.Last(); k != nil {
|
||||
var ci chunkInfo
|
||||
_ = json.Unmarshal(v, &ci)
|
||||
r["data"]["newest-ts"] = time.Unix(0, btoi(k))
|
||||
r["data"]["newest-file"] = ci.Path
|
||||
}
|
||||
|
||||
c = rootTsBucket.Cursor()
|
||||
if k, v := c.First(); k != nil {
|
||||
// split to get (abs path - offset)
|
||||
r["files"]["oldest-ts"] = time.Unix(0, btoi(k))
|
||||
r["files"]["oldest-name"] = string(v)
|
||||
}
|
||||
if k, v := c.Last(); k != nil {
|
||||
r["files"]["newest-ts"] = time.Unix(0, btoi(k))
|
||||
r["files"]["newest-name"] = string(v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Purge will flush the entire cache
|
||||
func (b *Persistent) Purge() {
|
||||
b.cleanupMux.Lock()
|
||||
defer b.cleanupMux.Unlock()
|
||||
|
||||
_ = b.db.Update(func(tx *bolt.Tx) error {
|
||||
_ = tx.DeleteBucket([]byte(RootBucket))
|
||||
_ = tx.DeleteBucket([]byte(RootTsBucket))
|
||||
_ = tx.DeleteBucket([]byte(DataTsBucket))
|
||||
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(RootBucket))
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket))
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket))
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
err := os.RemoveAll(b.dataPath)
|
||||
if err != nil {
|
||||
fs.Errorf(b, "issue removing data folder: %v", err)
|
||||
}
|
||||
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
||||
if err != nil {
|
||||
fs.Errorf(b, "issue removing data folder: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// GetChunkTs retrieves the current timestamp of this chunk
|
||||
func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
|
||||
var t time.Time
|
||||
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
tsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||
c := tsBucket.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
var ci chunkInfo
|
||||
err := json.Unmarshal(v, &ci)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if ci.Path == path && ci.Offset == offset {
|
||||
t = time.Unix(0, btoi(k))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Errorf("not found %v-%v", path, offset)
|
||||
})
|
||||
|
||||
return t, err
|
||||
}
|
||||
|
||||
func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string), kvFn func(key string, val []byte)) error {
|
||||
err := b.db.View(func(tx *bolt.Tx) error {
|
||||
var c *bolt.Cursor
|
||||
if buk == nil {
|
||||
c = tx.Cursor()
|
||||
} else {
|
||||
c = buk.Cursor()
|
||||
}
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if v == nil {
|
||||
var buk2 *bolt.Bucket
|
||||
if buk == nil {
|
||||
buk2 = tx.Bucket(k)
|
||||
} else {
|
||||
buk2 = buk.Bucket(k)
|
||||
}
|
||||
|
||||
bucketFn(string(k))
|
||||
_ = b.iterateBuckets(buk2, bucketFn, kvFn)
|
||||
} else {
|
||||
kvFn(string(k), v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Close should be called when the program ends gracefully
|
||||
func (b *Persistent) Close() {
|
||||
b.cleanupMux.Lock()
|
||||
defer b.cleanupMux.Unlock()
|
||||
|
||||
err := b.db.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(b, "closing handle: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// itob returns an 8-byte big endian representation of v.
|
||||
func itob(v int64) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
func btoi(d []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(d))
|
||||
}
|
||||
|
||||
// cloneBytes returns a copy of a given slice.
|
||||
func cloneBytes(v []byte) []byte {
|
||||
var clone = make([]byte, len(v))
|
||||
copy(clone, v)
|
||||
return clone
|
||||
}
|
||||
49
cmd/all/all.go
Normal file
49
cmd/all/all.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Package all imports all the commands
|
||||
package all
|
||||
|
||||
import (
|
||||
// Active commands
|
||||
_ "github.com/ncw/rclone/cmd"
|
||||
_ "github.com/ncw/rclone/cmd/authorize"
|
||||
_ "github.com/ncw/rclone/cmd/cachestats"
|
||||
_ "github.com/ncw/rclone/cmd/cat"
|
||||
_ "github.com/ncw/rclone/cmd/check"
|
||||
_ "github.com/ncw/rclone/cmd/cleanup"
|
||||
_ "github.com/ncw/rclone/cmd/cmount"
|
||||
_ "github.com/ncw/rclone/cmd/config"
|
||||
_ "github.com/ncw/rclone/cmd/copy"
|
||||
_ "github.com/ncw/rclone/cmd/copyto"
|
||||
_ "github.com/ncw/rclone/cmd/cryptcheck"
|
||||
_ "github.com/ncw/rclone/cmd/cryptdecode"
|
||||
_ "github.com/ncw/rclone/cmd/dbhashsum"
|
||||
_ "github.com/ncw/rclone/cmd/dedupe"
|
||||
_ "github.com/ncw/rclone/cmd/delete"
|
||||
_ "github.com/ncw/rclone/cmd/genautocomplete"
|
||||
_ "github.com/ncw/rclone/cmd/gendocs"
|
||||
_ "github.com/ncw/rclone/cmd/info"
|
||||
_ "github.com/ncw/rclone/cmd/listremotes"
|
||||
_ "github.com/ncw/rclone/cmd/ls"
|
||||
_ "github.com/ncw/rclone/cmd/ls2"
|
||||
_ "github.com/ncw/rclone/cmd/lsd"
|
||||
_ "github.com/ncw/rclone/cmd/lsjson"
|
||||
_ "github.com/ncw/rclone/cmd/lsl"
|
||||
_ "github.com/ncw/rclone/cmd/md5sum"
|
||||
_ "github.com/ncw/rclone/cmd/memtest"
|
||||
_ "github.com/ncw/rclone/cmd/mkdir"
|
||||
_ "github.com/ncw/rclone/cmd/mount"
|
||||
_ "github.com/ncw/rclone/cmd/move"
|
||||
_ "github.com/ncw/rclone/cmd/moveto"
|
||||
_ "github.com/ncw/rclone/cmd/ncdu"
|
||||
_ "github.com/ncw/rclone/cmd/obscure"
|
||||
_ "github.com/ncw/rclone/cmd/purge"
|
||||
_ "github.com/ncw/rclone/cmd/rcat"
|
||||
_ "github.com/ncw/rclone/cmd/rmdir"
|
||||
_ "github.com/ncw/rclone/cmd/rmdirs"
|
||||
_ "github.com/ncw/rclone/cmd/serve"
|
||||
_ "github.com/ncw/rclone/cmd/sha1sum"
|
||||
_ "github.com/ncw/rclone/cmd/size"
|
||||
_ "github.com/ncw/rclone/cmd/sync"
|
||||
_ "github.com/ncw/rclone/cmd/touch"
|
||||
_ "github.com/ncw/rclone/cmd/tree"
|
||||
_ "github.com/ncw/rclone/cmd/version"
|
||||
)
|
||||
45
cmd/atexit.go
Normal file
45
cmd/atexit.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package cmd
|
||||
|
||||
// Atexit handling
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
var (
|
||||
atExitFns []func()
|
||||
atExitOnce sync.Once
|
||||
atExitRegisterOnce sync.Once
|
||||
)
|
||||
|
||||
// AtExit registers a function to be added on exit
|
||||
func AtExit(fn func()) {
|
||||
atExitFns = append(atExitFns, fn)
|
||||
// Run AtExit handlers on SIGINT or SIGTERM so everything gets
|
||||
// tidied up properly
|
||||
atExitRegisterOnce.Do(func() {
|
||||
go func() {
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, os.Interrupt) // syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT
|
||||
sig := <-ch
|
||||
fs.Infof(nil, "Signal received: %s", sig)
|
||||
runAtExitFunctions()
|
||||
fs.Infof(nil, "Exiting...")
|
||||
os.Exit(0)
|
||||
}()
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// Runs all the AtExit functions if they haven't been run already
|
||||
func runAtExitFunctions() {
|
||||
atExitOnce.Do(func() {
|
||||
for _, fn := range atExitFns {
|
||||
fn()
|
||||
}
|
||||
})
|
||||
}
|
||||
24
cmd/authorize/authorize.go
Normal file
24
cmd/authorize/authorize.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package authorize
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "authorize",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `
|
||||
Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
fs.Authorize(args)
|
||||
},
|
||||
}
|
||||
64
cmd/cachestats/cachestats.go
Normal file
64
cmd/cachestats/cachestats.go
Normal file
@@ -0,0 +1,64 @@
|
||||
// +build !plan9,go1.7
|
||||
|
||||
package cachestats
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/cache"
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cachestats source:",
|
||||
Short: `Print cache stats for a remote`,
|
||||
Long: `
|
||||
Print cache stats for a remote in JSON format
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
_, configName, _, err := fs.ParseRemote(args[0])
|
||||
if err != nil {
|
||||
fs.Errorf("cachestats", "%s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if !fs.ConfigFileGetBool(configName, "read_only", false) {
|
||||
fs.ConfigFileSet(configName, "read_only", "true")
|
||||
defer fs.ConfigFileDeleteKey(configName, "read_only")
|
||||
}
|
||||
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
var fsCache *cache.Fs
|
||||
fsCache, ok := fsrc.(*cache.Fs)
|
||||
if !ok {
|
||||
unwrap := fsrc.Features().UnWrap
|
||||
if unwrap != nil {
|
||||
fsCache, ok = unwrap().(*cache.Fs)
|
||||
}
|
||||
if !ok {
|
||||
return errors.Errorf("%s: is not a cache remote", fsrc.Name())
|
||||
}
|
||||
}
|
||||
m, err := fsCache.Stats()
|
||||
|
||||
raw, err := json.MarshalIndent(m, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", string(raw))
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
6
cmd/cachestats/cachestats_unsupported.go
Normal file
6
cmd/cachestats/cachestats_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 !go1.7
|
||||
|
||||
package cachestats
|
||||
80
cmd/cat/cat.go
Normal file
80
cmd/cat/cat.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package cat
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
head = int64(0)
|
||||
tail = int64(0)
|
||||
offset = int64(0)
|
||||
count = int64(-1)
|
||||
discard = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().Int64VarP(&head, "head", "", head, "Only print the first N characters.")
|
||||
commandDefintion.Flags().Int64VarP(&tail, "tail", "", tail, "Only print the last N characters.")
|
||||
commandDefintion.Flags().Int64VarP(&offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
|
||||
commandDefintion.Flags().Int64VarP(&count, "count", "", count, "Only print N characters.")
|
||||
commandDefintion.Flags().BoolVarP(&discard, "discard", "", discard, "Discard the output instead of printing.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "cat remote:path",
|
||||
Short: `Concatenates any files and sends them to stdout.`,
|
||||
Long: `
|
||||
rclone cat sends any files to standard output.
|
||||
|
||||
You can use it like this to output a single file
|
||||
|
||||
rclone cat remote:path/to/file
|
||||
|
||||
Or like this to output any file in dir or subdirectories.
|
||||
|
||||
rclone cat remote:path/to/dir
|
||||
|
||||
Or like this to output any .txt files in dir or subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|
||||
Use the --head flag to print characters only at the start, --tail for
|
||||
the end and --offset and --count to print a section in the middle.
|
||||
Note that if offset is negative it will count from the end, so
|
||||
--offset -1 --count 1 is equivalent to --tail 1.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
usedOffset := offset != 0 || count >= 0
|
||||
usedHead := head > 0
|
||||
usedTail := tail > 0
|
||||
if usedHead && usedTail || usedHead && usedOffset || usedTail && usedOffset {
|
||||
log.Fatalf("Can only use one of --head, --tail or --offset with --count")
|
||||
}
|
||||
if head > 0 {
|
||||
offset = 0
|
||||
count = head
|
||||
}
|
||||
if tail > 0 {
|
||||
offset = -tail
|
||||
count = -1
|
||||
}
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
var w io.Writer = os.Stdout
|
||||
if discard {
|
||||
w = ioutil.Discard
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.Cat(fsrc, w, offset, count)
|
||||
})
|
||||
},
|
||||
}
|
||||
45
cmd/check/check.go
Normal file
45
cmd/check/check.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
download = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "check source:path dest:path",
|
||||
Short: `Checks the files in the source and destination match.`,
|
||||
Long: `
|
||||
Checks the files in the source and destination match. It compares
|
||||
sizes and hashes (MD5 or SHA1) and logs a report of files which don't
|
||||
match. It doesn't alter the source or destination.
|
||||
|
||||
If you supply the --size-only flag, it will only compare the sizes not
|
||||
the hashes as well. Use this for a quick check.
|
||||
|
||||
If you supply the --download flag, it will download the data from
|
||||
both remotes and check them against each other on the fly. This can
|
||||
be useful for remotes that don't support hashes or if you really want
|
||||
to check all the data.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if download {
|
||||
return fs.CheckDownload(fdst, fsrc)
|
||||
}
|
||||
return fs.Check(fdst, fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
27
cmd/cleanup/cleanup.go
Normal file
27
cmd/cleanup/cleanup.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package cleanup
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible`,
|
||||
Long: `
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return fs.CleanUp(fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
435
cmd/cmd.go
Normal file
435
cmd/cmd.go
Normal file
@@ -0,0 +1,435 @@
|
||||
// Package cmd implemnts the rclone command
|
||||
//
|
||||
// It is in a sub package so it's internals can be re-used elsewhere
|
||||
package cmd
|
||||
|
||||
// FIXME only attach the remote flags when using a remote???
|
||||
// would probably mean bringing all the flags in to here? Or define some flagsets in fs...
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cpuProfile = fs.StringP("cpuprofile", "", "", "Write cpu profile to file")
|
||||
memProfile = fs.StringP("memprofile", "", "", "Write memory profile to file")
|
||||
statsInterval = fs.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
|
||||
dataRateUnit = fs.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
|
||||
version bool
|
||||
retries = fs.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
||||
// Errors
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
errorUncategorized = errors.New("uncategorized error")
|
||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||
errorTooManyArguents = errors.New("too many arguments")
|
||||
errorUsageError = errors.New("usage error")
|
||||
)
|
||||
|
||||
const (
|
||||
exitCodeSuccess = iota
|
||||
exitCodeUsageError
|
||||
exitCodeUncategorizedError
|
||||
exitCodeDirNotFound
|
||||
exitCodeFileNotFound
|
||||
exitCodeRetryError
|
||||
exitCodeNoRetryError
|
||||
exitCodeFatalError
|
||||
)
|
||||
|
||||
// Root is the main rclone command
|
||||
var Root = &cobra.Command{
|
||||
Use: "rclone",
|
||||
Short: "Sync files and directories to and from local and remote object stores - " + fs.Version,
|
||||
Long: `
|
||||
Rclone is a command line program to sync files and directories to and
|
||||
from various cloud storage systems and using file transfer services, such as:
|
||||
|
||||
* Amazon Drive
|
||||
* Amazon S3
|
||||
* Backblaze B2
|
||||
* Box
|
||||
* Dropbox
|
||||
* FTP
|
||||
* Google Cloud Storage
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* pCloud
|
||||
* QingStor
|
||||
* SFTP
|
||||
* Webdav / Owncloud / Nextcloud
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* Copy mode to just copy new/changed files
|
||||
* Sync (one way) mode to make a directory identical
|
||||
* Check mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
|
||||
* https://rclone.org/
|
||||
`,
|
||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {
|
||||
fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args)
|
||||
runAtExitFunctions()
|
||||
},
|
||||
}
|
||||
|
||||
// runRoot implements the main rclone command with no subcommands
|
||||
func runRoot(cmd *cobra.Command, args []string) {
|
||||
if version {
|
||||
ShowVersion()
|
||||
resolveExitCode(nil)
|
||||
} else {
|
||||
_ = Root.Usage()
|
||||
fmt.Fprintf(os.Stderr, "Command not found.\n")
|
||||
resolveExitCode(errorCommandNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
Root.Run = runRoot
|
||||
Root.Flags().BoolVarP(&version, "version", "V", false, "Print the version number")
|
||||
cobra.OnInitialize(initConfig)
|
||||
}
|
||||
|
||||
// ShowVersion prints the version to stdout
|
||||
func ShowVersion() {
|
||||
fmt.Printf("rclone %s\n", fs.Version)
|
||||
fmt.Printf("- os/arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
|
||||
fmt.Printf("- go version: %s\n", runtime.Version())
|
||||
}
|
||||
|
||||
// newFsFile creates a dst Fs from a name but may point to a file.
|
||||
//
|
||||
// It returns a string with the file name if points to a file
|
||||
func newFsFile(remote string) (fs.Fs, string) {
|
||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
f, err := fsInfo.NewFs(configName, fsPath)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
return f, path.Base(fsPath)
|
||||
case nil:
|
||||
return f, ""
|
||||
default:
|
||||
fs.Stats.Error(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
// newFsSrc creates a src Fs from a name
|
||||
//
|
||||
// It returns a string with the file name if limiting to one file
|
||||
//
|
||||
// This can point to a file
|
||||
func newFsSrc(remote string) (fs.Fs, string) {
|
||||
f, fileName := newFsFile(remote)
|
||||
if fileName != "" {
|
||||
if !fs.Config.Filter.InActive() {
|
||||
err := errors.Errorf("Can't limit to single files when using filters: %v", remote)
|
||||
fs.Stats.Error(err)
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
// Limit transfers to this file
|
||||
err := fs.Config.Filter.AddFile(fileName)
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
|
||||
}
|
||||
// Set --no-traverse as only one file
|
||||
fs.Config.NoTraverse = true
|
||||
}
|
||||
return f, fileName
|
||||
}
|
||||
|
||||
// newFsDst creates a dst Fs from a name
|
||||
//
|
||||
// This must point to a directory
|
||||
func newFsDst(remote string) fs.Fs {
|
||||
f, err := fs.NewFs(remote)
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// NewFsSrcDst creates a new src and dst fs from the arguments
|
||||
func NewFsSrcDst(args []string) (fs.Fs, fs.Fs) {
|
||||
fsrc, _ := newFsSrc(args[0])
|
||||
fdst := newFsDst(args[1])
|
||||
fs.CalculateModifyWindow(fdst, fsrc)
|
||||
return fsrc, fdst
|
||||
}
|
||||
|
||||
// NewFsSrcDstFiles creates a new src and dst fs from the arguments
|
||||
// If src is a file then srcFileName and dstFileName will be non-empty
|
||||
func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs, dstFileName string) {
|
||||
fsrc, srcFileName = newFsSrc(args[0])
|
||||
// If copying a file...
|
||||
dstRemote := args[1]
|
||||
// If file exists then srcFileName != "", however if the file
|
||||
// doesn't exist then we assume it is a directory...
|
||||
if srcFileName != "" {
|
||||
dstRemote, dstFileName = fs.RemoteSplit(dstRemote)
|
||||
if dstRemote == "" {
|
||||
dstRemote = "."
|
||||
}
|
||||
if dstFileName == "" {
|
||||
log.Fatalf("%q is a directory", args[1])
|
||||
}
|
||||
}
|
||||
fdst, err := fs.NewFs(dstRemote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
fs.Stats.Error(err)
|
||||
log.Fatalf("Source doesn't exist or is a directory and destination is a file")
|
||||
case nil:
|
||||
default:
|
||||
fs.Stats.Error(err)
|
||||
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
|
||||
}
|
||||
fs.CalculateModifyWindow(fdst, fsrc)
|
||||
return
|
||||
}
|
||||
|
||||
// NewFsSrc creates a new src fs from the arguments
|
||||
func NewFsSrc(args []string) fs.Fs {
|
||||
fsrc, _ := newFsSrc(args[0])
|
||||
fs.CalculateModifyWindow(fsrc)
|
||||
return fsrc
|
||||
}
|
||||
|
||||
// NewFsDst creates a new dst fs from the arguments
|
||||
//
|
||||
// Dst fs-es can't point to single files
|
||||
func NewFsDst(args []string) fs.Fs {
|
||||
fdst := newFsDst(args[0])
|
||||
fs.CalculateModifyWindow(fdst)
|
||||
return fdst
|
||||
}
|
||||
|
||||
// NewFsDstFile creates a new dst fs with a destination file name from the arguments
|
||||
func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
|
||||
dstRemote, dstFileName := fs.RemoteSplit(args[0])
|
||||
if dstRemote == "" {
|
||||
dstRemote = "."
|
||||
}
|
||||
if dstFileName == "" {
|
||||
log.Fatalf("%q is a directory", args[0])
|
||||
}
|
||||
fdst = newFsDst(dstRemote)
|
||||
fs.CalculateModifyWindow(fdst)
|
||||
return
|
||||
}
|
||||
|
||||
// ShowStats returns true if the user added a `--stats` flag to the command line.
|
||||
//
|
||||
// This is called by Run to override the default value of the
|
||||
// showStats passed in.
|
||||
func ShowStats() bool {
|
||||
statsIntervalFlag := pflag.Lookup("stats")
|
||||
return statsIntervalFlag != nil && statsIntervalFlag.Changed
|
||||
}
|
||||
|
||||
// Run the function with stats and retries if required
|
||||
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
var err error
|
||||
var stopStats chan struct{}
|
||||
if !showStats && ShowStats() {
|
||||
showStats = true
|
||||
}
|
||||
if showStats {
|
||||
stopStats = StartStats()
|
||||
}
|
||||
for try := 1; try <= *retries; try++ {
|
||||
err = f()
|
||||
if !Retry || (err == nil && !fs.Stats.Errored()) {
|
||||
if try > 1 {
|
||||
fs.Errorf(nil, "Attempt %d/%d succeeded", try, *retries)
|
||||
}
|
||||
break
|
||||
}
|
||||
if fs.IsFatalError(err) {
|
||||
fs.Errorf(nil, "Fatal error received - not attempting retries")
|
||||
break
|
||||
}
|
||||
if fs.IsNoRetryError(err) {
|
||||
fs.Errorf(nil, "Can't retry this error - not attempting retries")
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
|
||||
} else {
|
||||
fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
|
||||
}
|
||||
if try < *retries {
|
||||
fs.Stats.ResetErrors()
|
||||
}
|
||||
}
|
||||
if showStats {
|
||||
close(stopStats)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Failed to %s: %v", cmd.Name(), err)
|
||||
resolveExitCode(err)
|
||||
}
|
||||
if showStats && (fs.Stats.Errored() || *statsInterval > 0) {
|
||||
fs.Stats.Log()
|
||||
}
|
||||
fs.Debugf(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
|
||||
if fs.Stats.Errored() {
|
||||
resolveExitCode(fs.Stats.GetLastError())
|
||||
}
|
||||
}
|
||||
|
||||
// CheckArgs checks there are enough arguments and prints a message if not
|
||||
func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
|
||||
if len(args) < MinArgs {
|
||||
_ = cmd.Usage()
|
||||
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments mininum\n", cmd.Name(), MinArgs)
|
||||
// os.Exit(1)
|
||||
resolveExitCode(errorNotEnoughArguments)
|
||||
} else if len(args) > MaxArgs {
|
||||
_ = cmd.Usage()
|
||||
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
|
||||
// os.Exit(1)
|
||||
resolveExitCode(errorTooManyArguents)
|
||||
}
|
||||
}
|
||||
|
||||
// StartStats prints the stats every statsInterval
|
||||
//
|
||||
// It returns a channel which should be closed to stop the stats.
|
||||
func StartStats() chan struct{} {
|
||||
stopStats := make(chan struct{})
|
||||
if *statsInterval > 0 {
|
||||
go func() {
|
||||
ticker := time.NewTicker(*statsInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
fs.Stats.Log()
|
||||
case <-stopStats:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
return stopStats
|
||||
}
|
||||
|
||||
// initConfig is run by cobra after initialising the flags
|
||||
func initConfig() {
|
||||
// Start the logger
|
||||
fs.InitLogging()
|
||||
|
||||
// Load the rest of the config now we have started the logger
|
||||
fs.LoadConfig()
|
||||
|
||||
// Write the args for debug purposes
|
||||
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
|
||||
|
||||
// Setup CPU profiling if desired
|
||||
if *cpuProfile != "" {
|
||||
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
|
||||
f, err := os.Create(*cpuProfile)
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = pprof.StartCPUProfile(f)
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
AtExit(func() {
|
||||
pprof.StopCPUProfile()
|
||||
})
|
||||
}
|
||||
|
||||
// Setup memory profiling if desired
|
||||
if *memProfile != "" {
|
||||
AtExit(func() {
|
||||
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
|
||||
f, err := os.Create(*memProfile)
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = pprof.WriteHeapProfile(f)
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if m, _ := regexp.MatchString("^(bits|bytes)$", *dataRateUnit); m == false {
|
||||
fs.Errorf(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.")
|
||||
fs.Config.DataRateUnit = "bytes"
|
||||
} else {
|
||||
fs.Config.DataRateUnit = *dataRateUnit
|
||||
}
|
||||
}
|
||||
|
||||
func resolveExitCode(err error) {
|
||||
if err == nil {
|
||||
os.Exit(exitCodeSuccess)
|
||||
}
|
||||
|
||||
err = errors.Cause(err)
|
||||
|
||||
switch {
|
||||
case err == fs.ErrorDirNotFound:
|
||||
os.Exit(exitCodeDirNotFound)
|
||||
case err == fs.ErrorObjectNotFound:
|
||||
os.Exit(exitCodeFileNotFound)
|
||||
case err == errorUncategorized:
|
||||
os.Exit(exitCodeUncategorizedError)
|
||||
case fs.ShouldRetry(err):
|
||||
os.Exit(exitCodeRetryError)
|
||||
case fs.IsNoRetryError(err):
|
||||
os.Exit(exitCodeNoRetryError)
|
||||
case fs.IsFatalError(err):
|
||||
os.Exit(exitCodeFatalError)
|
||||
default:
|
||||
os.Exit(exitCodeUsageError)
|
||||
}
|
||||
}
|
||||
571
cmd/cmount/fs.go
Normal file
571
cmd/cmount/fs.go
Normal file
@@ -0,0 +1,571 @@
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const fhUnset = ^uint64(0)
|
||||
|
||||
// FS represents the top level filing system
|
||||
type FS struct {
|
||||
VFS *vfs.VFS
|
||||
f fs.Fs
|
||||
ready chan (struct{})
|
||||
mu sync.Mutex // to protect the below
|
||||
handles []vfs.Handle
|
||||
}
|
||||
|
||||
// NewFS makes a new FS
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
fsys := &FS{
|
||||
VFS: vfs.New(f, &vfsflags.Opt),
|
||||
f: f,
|
||||
ready: make(chan (struct{})),
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
|
||||
// Open a handle returning an integer file handle
|
||||
func (fsys *FS) openHandle(handle vfs.Handle) (fh uint64) {
|
||||
fsys.mu.Lock()
|
||||
defer fsys.mu.Unlock()
|
||||
var i int
|
||||
var oldHandle vfs.Handle
|
||||
for i, oldHandle = range fsys.handles {
|
||||
if oldHandle == nil {
|
||||
fsys.handles[i] = handle
|
||||
goto found
|
||||
}
|
||||
}
|
||||
fsys.handles = append(fsys.handles, handle)
|
||||
i = len(fsys.handles) - 1
|
||||
found:
|
||||
return uint64(i)
|
||||
}
|
||||
|
||||
// get the handle for fh, call with the lock held
|
||||
func (fsys *FS) _getHandle(fh uint64) (i int, handle vfs.Handle, errc int) {
|
||||
if fh > uint64(len(fsys.handles)) {
|
||||
fs.Debugf(nil, "Bad file handle: too big: 0x%X", fh)
|
||||
return i, nil, -fuse.EBADF
|
||||
}
|
||||
i = int(fh)
|
||||
handle = fsys.handles[i]
|
||||
if handle == nil {
|
||||
fs.Debugf(nil, "Bad file handle: nil handle: 0x%X", fh)
|
||||
return i, nil, -fuse.EBADF
|
||||
}
|
||||
return i, handle, 0
|
||||
}
|
||||
|
||||
// Get the handle for the file handle
|
||||
func (fsys *FS) getHandle(fh uint64) (handle vfs.Handle, errc int) {
|
||||
fsys.mu.Lock()
|
||||
_, handle, errc = fsys._getHandle(fh)
|
||||
fsys.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Close the handle
|
||||
func (fsys *FS) closeHandle(fh uint64) (errc int) {
|
||||
fsys.mu.Lock()
|
||||
i, _, errc := fsys._getHandle(fh)
|
||||
if errc == 0 {
|
||||
fsys.handles[i] = nil
|
||||
}
|
||||
fsys.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// lookup a Node given a path
|
||||
func (fsys *FS) lookupNode(path string) (node vfs.Node, errc int) {
|
||||
node, err := fsys.VFS.Stat(path)
|
||||
return node, translateError(err)
|
||||
}
|
||||
|
||||
// lookup a Dir given a path
|
||||
func (fsys *FS) lookupDir(path string) (dir *vfs.Dir, errc int) {
|
||||
node, errc := fsys.lookupNode(path)
|
||||
if errc != 0 {
|
||||
return nil, errc
|
||||
}
|
||||
dir, ok := node.(*vfs.Dir)
|
||||
if !ok {
|
||||
return nil, -fuse.ENOTDIR
|
||||
}
|
||||
return dir, 0
|
||||
}
|
||||
|
||||
// lookup a parent Dir given a path returning the dir and the leaf
|
||||
func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, errc int) {
|
||||
parentDir, leaf := path.Split(filePath)
|
||||
dir, errc = fsys.lookupDir(parentDir)
|
||||
return leaf, dir, errc
|
||||
}
|
||||
|
||||
// lookup a File given a path
|
||||
func (fsys *FS) lookupFile(path string) (file *vfs.File, errc int) {
|
||||
node, errc := fsys.lookupNode(path)
|
||||
if errc != 0 {
|
||||
return nil, errc
|
||||
}
|
||||
file, ok := node.(*vfs.File)
|
||||
if !ok {
|
||||
return nil, -fuse.EISDIR
|
||||
}
|
||||
return file, 0
|
||||
}
|
||||
|
||||
// get a node and handle from the path or from the fh if not fhUnset
|
||||
//
|
||||
// handle may be nil
|
||||
func (fsys *FS) getNode(path string, fh uint64) (node vfs.Node, handle vfs.Handle, errc int) {
|
||||
if fh == fhUnset {
|
||||
node, errc = fsys.lookupNode(path)
|
||||
} else {
|
||||
handle, errc = fsys.getHandle(fh)
|
||||
if errc == 0 {
|
||||
node = handle.Node()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// stat fills up the stat block for Node
|
||||
func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
|
||||
Size := uint64(node.Size())
|
||||
Blocks := (Size + 511) / 512
|
||||
modTime := node.ModTime()
|
||||
Mode := node.Mode().Perm()
|
||||
if node.IsDir() {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
//stat.Dev = 1
|
||||
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
|
||||
stat.Mode = uint32(Mode)
|
||||
stat.Nlink = 1
|
||||
stat.Uid = fsys.VFS.Opt.UID
|
||||
stat.Gid = fsys.VFS.Opt.GID
|
||||
//stat.Rdev
|
||||
stat.Size = int64(Size)
|
||||
t := fuse.NewTimespec(modTime)
|
||||
stat.Atim = t
|
||||
stat.Mtim = t
|
||||
stat.Ctim = t
|
||||
stat.Blksize = 512
|
||||
stat.Blocks = int64(Blocks)
|
||||
stat.Birthtim = t
|
||||
// fs.Debugf(nil, "stat = %+v", *stat)
|
||||
return 0
|
||||
}
|
||||
|
||||
// Init is called after the filesystem is ready
|
||||
func (fsys *FS) Init() {
|
||||
defer fs.Trace(fsys.f, "")("")
|
||||
close(fsys.ready)
|
||||
}
|
||||
|
||||
// Destroy is called when it is unmounted (note that depending on how
|
||||
// the file system is terminated the file system may not receive the
|
||||
// Destroy call).
|
||||
func (fsys *FS) Destroy() {
|
||||
defer fs.Trace(fsys.f, "")("")
|
||||
}
|
||||
|
||||
// Getattr reads the attributes for path
|
||||
func (fsys *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "fh=0x%X", fh)("errc=%v", &errc)
|
||||
node, _, errc := fsys.getNode(path, fh)
|
||||
if errc == 0 {
|
||||
errc = fsys.stat(node, stat)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Opendir opens path as a directory
|
||||
func (fsys *FS) Opendir(path string) (errc int, fh uint64) {
|
||||
defer fs.Trace(path, "")("errc=%d, fh=0x%X", &errc, &fh)
|
||||
handle, err := fsys.VFS.OpenFile(path, os.O_RDONLY, 0777)
|
||||
if errc != 0 {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
return 0, fsys.openHandle(handle)
|
||||
}
|
||||
|
||||
// Readdir reads the directory at dirPath
|
||||
func (fsys *FS) Readdir(dirPath string,
|
||||
fill func(name string, stat *fuse.Stat_t, ofst int64) bool,
|
||||
ofst int64,
|
||||
fh uint64) (errc int) {
|
||||
itemsRead := -1
|
||||
defer fs.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
|
||||
|
||||
node, errc := fsys.getHandle(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
|
||||
items, err := node.Readdir(-1)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Optionally, create a struct stat that describes the file as
|
||||
// for getattr (but FUSE only looks at st_ino and the
|
||||
// file-type bits of st_mode).
|
||||
//
|
||||
// FIXME If you call host.SetCapReaddirPlus() then WinFsp will
|
||||
// use the full stat information - a Useful optimization on
|
||||
// Windows.
|
||||
//
|
||||
// NB we are using the first mode for readdir: The readdir
|
||||
// implementation ignores the offset parameter, and passes
|
||||
// zero to the filler function's offset. The filler function
|
||||
// will not return '1' (unless an error happens), so the whole
|
||||
// directory is read in a single readdir operation.
|
||||
fill(".", nil, 0)
|
||||
fill("..", nil, 0)
|
||||
for _, item := range items {
|
||||
node, ok := item.(vfs.Node)
|
||||
if ok {
|
||||
fill(node.Name(), nil, 0)
|
||||
}
|
||||
}
|
||||
itemsRead = len(items)
|
||||
return 0
|
||||
}
|
||||
|
||||
// Releasedir finished reading the directory
|
||||
func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
|
||||
return fsys.closeHandle(fh)
|
||||
}
|
||||
|
||||
// Statfs reads overall stats on the filessystem
|
||||
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
||||
defer fs.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
|
||||
const blockSize = 4096
|
||||
fsBlocks := uint64(1 << 50)
|
||||
if runtime.GOOS == "windows" {
|
||||
fsBlocks = (1 << 43) - 1
|
||||
}
|
||||
stat.Blocks = fsBlocks // Total data blocks in file system.
|
||||
stat.Bfree = fsBlocks // Free blocks in file system.
|
||||
stat.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
stat.Files = 1E9 // Total files in file system.
|
||||
stat.Ffree = 1E9 // Free files in file system.
|
||||
stat.Bsize = blockSize // Block size
|
||||
stat.Namemax = 255 // Maximum file name length?
|
||||
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
return 0
|
||||
}
|
||||
|
||||
// Open opens a file
|
||||
func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
|
||||
defer fs.Trace(path, "flags=0x%X", flags)("errc=%d, fh=0x%X", &errc, &fh)
|
||||
|
||||
// translate the fuse flags to os flags
|
||||
flags = translateOpenFlags(flags) | os.O_CREATE
|
||||
handle, err := fsys.VFS.OpenFile(path, flags, 0777)
|
||||
if errc != 0 {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
|
||||
return 0, fsys.openHandle(handle)
|
||||
}
|
||||
|
||||
// Create creates and opens a file.
|
||||
func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh uint64) {
|
||||
defer fs.Trace(filePath, "flags=0x%X, mode=0%o", flags, mode)("errc=%d, fh=0x%X", &errc, &fh)
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
|
||||
if errc != 0 {
|
||||
return errc, fhUnset
|
||||
}
|
||||
file, err := parentDir.Create(leaf)
|
||||
if err != nil {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
// translate the fuse flags to os flags
|
||||
flags = translateOpenFlags(flags) | os.O_CREATE
|
||||
handle, err := file.Open(flags)
|
||||
if err != nil {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
return 0, fsys.openHandle(handle)
|
||||
}
|
||||
|
||||
// Truncate truncates a file to size
|
||||
func (fsys *FS) Truncate(path string, size int64, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "size=%d, fh=0x%X", size, fh)("errc=%d", &errc)
|
||||
node, handle, errc := fsys.getNode(path, fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
var err error
|
||||
if handle != nil {
|
||||
err = handle.Truncate(size)
|
||||
} else {
|
||||
err = node.Truncate(size)
|
||||
}
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Read data from file handle
|
||||
func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||
defer fs.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
|
||||
handle, errc := fsys.getHandle(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
n, err := handle.ReadAt(buff, ofst)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Write data to file handle
|
||||
func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||
defer fs.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
|
||||
handle, errc := fsys.getHandle(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
n, err := handle.WriteAt(buff, ofst)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Flush flushes an open file descriptor or path
|
||||
func (fsys *FS) Flush(path string, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
|
||||
handle, errc := fsys.getHandle(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
return translateError(handle.Flush())
|
||||
}
|
||||
|
||||
// Release closes the file if still open
|
||||
func (fsys *FS) Release(path string, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
|
||||
handle, errc := fsys.getHandle(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
_ = fsys.closeHandle(fh)
|
||||
return translateError(handle.Release())
|
||||
}
|
||||
|
||||
// Unlink removes a file.
|
||||
func (fsys *FS) Unlink(filePath string) (errc int) {
|
||||
defer fs.Trace(filePath, "")("errc=%d", &errc)
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
return translateError(parentDir.RemoveName(leaf))
|
||||
}
|
||||
|
||||
// Mkdir creates a directory.
|
||||
func (fsys *FS) Mkdir(dirPath string, mode uint32) (errc int) {
|
||||
defer fs.Trace(dirPath, "mode=0%o", mode)("errc=%d", &errc)
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
_, err := parentDir.Mkdir(leaf)
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Rmdir removes a directory
|
||||
func (fsys *FS) Rmdir(dirPath string) (errc int) {
|
||||
defer fs.Trace(dirPath, "")("errc=%d", &errc)
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
return translateError(parentDir.RemoveName(leaf))
|
||||
}
|
||||
|
||||
// Rename renames a file.
|
||||
func (fsys *FS) Rename(oldPath string, newPath string) (errc int) {
|
||||
defer fs.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc)
|
||||
return translateError(fsys.VFS.Rename(oldPath, newPath))
|
||||
}
|
||||
|
||||
// Utimens changes the access and modification times of a file.
|
||||
func (fsys *FS) Utimens(path string, tmsp []fuse.Timespec) (errc int) {
|
||||
defer fs.Trace(path, "tmsp=%+v", tmsp)("errc=%d", &errc)
|
||||
node, errc := fsys.lookupNode(path)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
var t time.Time
|
||||
if tmsp == nil || len(tmsp) < 2 {
|
||||
t = time.Now()
|
||||
} else {
|
||||
t = tmsp[1].Time()
|
||||
}
|
||||
return translateError(node.SetModTime(t))
|
||||
}
|
||||
|
||||
// Mknod creates a file node.
|
||||
func (fsys *FS) Mknod(path string, mode uint32, dev uint64) (errc int) {
|
||||
defer fs.Trace(path, "mode=0x%X, dev=0x%X", mode, dev)("errc=%d", &errc)
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Fsync synchronizes file contents.
|
||||
func (fsys *FS) Fsync(path string, datasync bool, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
|
||||
// This is a no-op for rclone
|
||||
return 0
|
||||
}
|
||||
|
||||
// Link creates a hard link to a file.
|
||||
func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
|
||||
defer fs.Trace(oldpath, "newpath=%q", newpath)("errc=%d", &errc)
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Symlink creates a symbolic link.
|
||||
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
|
||||
defer fs.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Readlink reads the target of a symbolic link.
|
||||
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
|
||||
defer fs.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
|
||||
return -fuse.ENOSYS, ""
|
||||
}
|
||||
|
||||
// Chmod changes the permission bits of a file.
|
||||
func (fsys *FS) Chmod(path string, mode uint32) (errc int) {
|
||||
defer fs.Trace(path, "mode=0%o", mode)("errc=%d", &errc)
|
||||
// This is a no-op for rclone
|
||||
return 0
|
||||
}
|
||||
|
||||
// Chown changes the owner and group of a file.
|
||||
func (fsys *FS) Chown(path string, uid uint32, gid uint32) (errc int) {
|
||||
defer fs.Trace(path, "uid=%d, gid=%d", uid, gid)("errc=%d", &errc)
|
||||
// This is a no-op for rclone
|
||||
return 0
|
||||
}
|
||||
|
||||
// Access checks file access permissions.
|
||||
func (fsys *FS) Access(path string, mask uint32) (errc int) {
|
||||
defer fs.Trace(path, "mask=0%o", mask)("errc=%d", &errc)
|
||||
// This is a no-op for rclone
|
||||
return 0
|
||||
}
|
||||
|
||||
// Fsyncdir synchronizes directory contents.
|
||||
func (fsys *FS) Fsyncdir(path string, datasync bool, fh uint64) (errc int) {
|
||||
defer fs.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
|
||||
// This is a no-op for rclone
|
||||
return 0
|
||||
}
|
||||
|
||||
// Setxattr sets extended attributes.
|
||||
func (fsys *FS) Setxattr(path string, name string, value []byte, flags int) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Getxattr gets extended attributes.
|
||||
func (fsys *FS) Getxattr(path string, name string) (errc int, value []byte) {
|
||||
return -fuse.ENOSYS, nil
|
||||
}
|
||||
|
||||
// Removexattr removes extended attributes.
|
||||
func (fsys *FS) Removexattr(path string, name string) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Listxattr lists extended attributes.
|
||||
func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Translate errors from mountlib
|
||||
func translateError(err error) (errc int) {
|
||||
if err == nil {
|
||||
return 0
|
||||
}
|
||||
switch errors.Cause(err) {
|
||||
case vfs.OK:
|
||||
return 0
|
||||
case vfs.ENOENT:
|
||||
return -fuse.ENOENT
|
||||
case vfs.EEXIST:
|
||||
return -fuse.EEXIST
|
||||
case vfs.EPERM:
|
||||
return -fuse.EPERM
|
||||
case vfs.ECLOSED:
|
||||
return -fuse.EBADF
|
||||
case vfs.ENOTEMPTY:
|
||||
return -fuse.ENOTEMPTY
|
||||
case vfs.ESPIPE:
|
||||
return -fuse.ESPIPE
|
||||
case vfs.EBADF:
|
||||
return -fuse.EBADF
|
||||
case vfs.EROFS:
|
||||
return -fuse.EROFS
|
||||
case vfs.ENOSYS:
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
fs.Errorf(nil, "IO error: %v", err)
|
||||
return -fuse.EIO
|
||||
}
|
||||
|
||||
// Translate Open Flags from FUSE to os (as used in the vfs layer)
|
||||
func translateOpenFlags(inFlags int) (outFlags int) {
|
||||
switch inFlags & fuse.O_ACCMODE {
|
||||
case fuse.O_RDONLY:
|
||||
outFlags = os.O_RDONLY
|
||||
case fuse.O_WRONLY:
|
||||
outFlags = os.O_WRONLY
|
||||
case fuse.O_RDWR:
|
||||
outFlags = os.O_RDWR
|
||||
}
|
||||
if inFlags&fuse.O_APPEND != 0 {
|
||||
outFlags |= os.O_APPEND
|
||||
}
|
||||
if inFlags&fuse.O_CREAT != 0 {
|
||||
outFlags |= os.O_CREATE
|
||||
}
|
||||
if inFlags&fuse.O_EXCL != 0 {
|
||||
outFlags |= os.O_EXCL
|
||||
}
|
||||
if inFlags&fuse.O_TRUNC != 0 {
|
||||
outFlags |= os.O_TRUNC
|
||||
}
|
||||
// NB O_SYNC isn't defined by fuse
|
||||
return outFlags
|
||||
}
|
||||
235
cmd/cmount/mount.go
Normal file
235
cmd/cmount/mount.go
Normal file
@@ -0,0 +1,235 @@
|
||||
// Package cmount implents a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
// This uses the cgo based cgofuse library
|
||||
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/okzk/sdnotify"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
name := "cmount"
|
||||
if runtime.GOOS == "windows" {
|
||||
name = "mount"
|
||||
}
|
||||
mountlib.NewMountCommand(name, Mount)
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(device string, mountpoint string) (options []string) {
|
||||
// Options
|
||||
options = []string{
|
||||
"-o", "fsname=" + device,
|
||||
"-o", "subtype=rclone",
|
||||
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
"-o", "atomic_o_trunc",
|
||||
}
|
||||
if mountlib.DebugFUSE {
|
||||
options = append(options, "-o", "debug")
|
||||
}
|
||||
|
||||
// OSX options
|
||||
if runtime.GOOS == "darwin" {
|
||||
options = append(options, "-o", "volname="+device)
|
||||
options = append(options, "-o", "noappledouble")
|
||||
options = append(options, "-o", "noapplexattr")
|
||||
}
|
||||
|
||||
// Windows options
|
||||
if runtime.GOOS == "windows" {
|
||||
// These cause WinFsp to mean the current user
|
||||
options = append(options, "-o", "uid=-1")
|
||||
options = append(options, "-o", "gid=-1")
|
||||
options = append(options, "--FileSystemName=rclone")
|
||||
}
|
||||
|
||||
if mountlib.AllowNonEmpty {
|
||||
options = append(options, "-o", "nonempty")
|
||||
}
|
||||
if mountlib.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
}
|
||||
if mountlib.AllowRoot {
|
||||
options = append(options, "-o", "allow_root")
|
||||
}
|
||||
if mountlib.DefaultPermissions {
|
||||
options = append(options, "-o", "default_permissions")
|
||||
}
|
||||
if vfsflags.Opt.ReadOnly {
|
||||
options = append(options, "-o", "ro")
|
||||
}
|
||||
if mountlib.WritebackCache {
|
||||
// FIXME? options = append(options, "-o", WritebackCache())
|
||||
}
|
||||
for _, option := range mountlib.ExtraOptions {
|
||||
options = append(options, "-o", option)
|
||||
}
|
||||
for _, option := range mountlib.ExtraFlags {
|
||||
options = append(options, option)
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
// waitFor runs fn() until it returns true or the timeout expires
|
||||
func waitFor(fn func() bool) (ok bool) {
|
||||
const totalWait = 10 * time.Second
|
||||
const individualWait = 10 * time.Millisecond
|
||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||
ok = fn()
|
||||
if ok {
|
||||
return ok
|
||||
}
|
||||
time.Sleep(individualWait)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// mount the file system
|
||||
//
|
||||
// The mount point will be ready when this returns.
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
// Check the mountpoint - in Windows the mountpoint musn't exist before the mount
|
||||
if runtime.GOOS != "windows" {
|
||||
fi, err := os.Stat(mountpoint)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "mountpoint")
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return nil, nil, nil, errors.New("mountpoint is not a directory")
|
||||
}
|
||||
}
|
||||
|
||||
// Create underlying FS
|
||||
fsys := NewFS(f)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)
|
||||
fs.Debugf(f, "Mounting with options: %q", options)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
var err error
|
||||
ok := host.Mount(mountpoint, options)
|
||||
if !ok {
|
||||
err = errors.New("mount failed")
|
||||
fs.Errorf(f, "Mount failed")
|
||||
}
|
||||
errChan <- err
|
||||
}()
|
||||
|
||||
// unmount
|
||||
unmount := func() error {
|
||||
// Shutdown the VFS
|
||||
fsys.VFS.Shutdown()
|
||||
fs.Debugf(nil, "Calling host.Unmount")
|
||||
if host.Unmount() {
|
||||
fs.Debugf(nil, "host.Unmount succeeded")
|
||||
if runtime.GOOS == "windows" {
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(mountpoint)
|
||||
return err != nil
|
||||
}) {
|
||||
fs.Errorf(nil, "mountpoint %q didn't disappear after unmount - continuing anyway", mountpoint)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(nil, "host.Unmount failed")
|
||||
return errors.New("host unmount failed")
|
||||
}
|
||||
|
||||
// Wait for the filesystem to become ready, checking the file
|
||||
// system didn't blow up before starting
|
||||
select {
|
||||
case err := <-errChan:
|
||||
err = errors.Wrap(err, "mount stopped before calling Init")
|
||||
return nil, nil, nil, err
|
||||
case <-fsys.ready:
|
||||
}
|
||||
|
||||
// Wait for the mount point to be available on Windows
|
||||
// On Windows the Init signal comes slightly before the mount is ready
|
||||
if runtime.GOOS == "windows" {
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(mountpoint)
|
||||
return err == nil
|
||||
}) {
|
||||
fs.Errorf(nil, "mountpoint %q didn't became available on mount - continuing anyway", mountpoint)
|
||||
}
|
||||
}
|
||||
|
||||
return fsys.VFS, errChan, unmount, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
// Mount it
|
||||
FS, errChan, _, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
// Note cgofuse unmounts the fs on SIGINT etc
|
||||
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
|
||||
if err := sdnotify.SdNotifyReady(); err != nil && err != sdnotify.SdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := FS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = sdnotify.SdNotifyStopping()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
39
cmd/cmount/mount_test.go
Normal file
39
cmd/cmount/mount_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
// +build !race !windows
|
||||
|
||||
// FIXME this doesn't work with the race detector under Windows either
|
||||
// hanging or producing lots of differences.
|
||||
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/cmd/mountlib/mounttest"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) { mounttest.TestMain(m, mount) }
|
||||
func TestDirLs(t *testing.T) { mounttest.TestDirLs(t) }
|
||||
func TestDirCreateAndRemoveDir(t *testing.T) { mounttest.TestDirCreateAndRemoveDir(t) }
|
||||
func TestDirCreateAndRemoveFile(t *testing.T) { mounttest.TestDirCreateAndRemoveFile(t) }
|
||||
func TestDirRenameFile(t *testing.T) { mounttest.TestDirRenameFile(t) }
|
||||
func TestDirRenameEmptyDir(t *testing.T) { mounttest.TestDirRenameEmptyDir(t) }
|
||||
func TestDirRenameFullDir(t *testing.T) { mounttest.TestDirRenameFullDir(t) }
|
||||
func TestDirModTime(t *testing.T) { mounttest.TestDirModTime(t) }
|
||||
func TestDirCacheFlush(t *testing.T) { mounttest.TestDirCacheFlush(t) }
|
||||
func TestDirCacheFlushOnDirRename(t *testing.T) { mounttest.TestDirCacheFlushOnDirRename(t) }
|
||||
func TestFileModTime(t *testing.T) { mounttest.TestFileModTime(t) }
|
||||
func TestFileModTimeWithOpenWriters(t *testing.T) {} // FIXME mounttest.TestFileModTimeWithOpenWriters(t)
|
||||
func TestMount(t *testing.T) { mounttest.TestMount(t) }
|
||||
func TestRoot(t *testing.T) { mounttest.TestRoot(t) }
|
||||
func TestReadByByte(t *testing.T) { mounttest.TestReadByByte(t) }
|
||||
func TestReadChecksum(t *testing.T) { mounttest.TestReadChecksum(t) }
|
||||
func TestReadFileDoubleClose(t *testing.T) { mounttest.TestReadFileDoubleClose(t) }
|
||||
func TestReadSeek(t *testing.T) { mounttest.TestReadSeek(t) }
|
||||
func TestWriteFileNoWrite(t *testing.T) { mounttest.TestWriteFileNoWrite(t) }
|
||||
func TestWriteFileWrite(t *testing.T) { mounttest.TestWriteFileWrite(t) }
|
||||
func TestWriteFileOverwrite(t *testing.T) { mounttest.TestWriteFileOverwrite(t) }
|
||||
func TestWriteFileDoubleClose(t *testing.T) { mounttest.TestWriteFileDoubleClose(t) }
|
||||
func TestWriteFileFsync(t *testing.T) { mounttest.TestWriteFileFsync(t) }
|
||||
6
cmd/cmount/mount_unsupported.go
Normal file
6
cmd/cmount/mount_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for cmount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !linux,!darwin,!freebsd,!windows !cgo !cmount
|
||||
|
||||
package cmount
|
||||
141
cmd/config/config.go
Normal file
141
cmd/config/config.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(configCommand)
|
||||
configCommand.AddCommand(configEditCommand)
|
||||
configCommand.AddCommand(configFileCommand)
|
||||
configCommand.AddCommand(configShowCommand)
|
||||
configCommand.AddCommand(configDumpCommand)
|
||||
configCommand.AddCommand(configProvidersCommand)
|
||||
configCommand.AddCommand(configCreateCommand)
|
||||
configCommand.AddCommand(configUpdateCommand)
|
||||
configCommand.AddCommand(configDeleteCommand)
|
||||
configCommand.AddCommand(configPasswordCommand)
|
||||
}
|
||||
|
||||
var configCommand = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: `Enter an interactive configuration session.`,
|
||||
Long: `Enter an interactive configuration session where you can setup new
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
fs.EditConfig()
|
||||
},
|
||||
}
|
||||
|
||||
var configEditCommand = &cobra.Command{
|
||||
Use: "edit",
|
||||
Short: configCommand.Short,
|
||||
Long: configCommand.Long,
|
||||
Run: configCommand.Run,
|
||||
}
|
||||
|
||||
var configFileCommand = &cobra.Command{
|
||||
Use: "file",
|
||||
Short: `Show path of configuration file in use.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
fs.ShowConfigLocation()
|
||||
},
|
||||
}
|
||||
|
||||
var configShowCommand = &cobra.Command{
|
||||
Use: "show [<remote>]",
|
||||
Short: `Print (decrypted) config file, or the config for a single remote.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if len(args) == 0 {
|
||||
fs.ShowConfig()
|
||||
} else {
|
||||
fs.ShowRemote(args[0])
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var configDumpCommand = &cobra.Command{
|
||||
Use: "dump",
|
||||
Short: `Dump the config file as JSON.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return fs.ConfigDump()
|
||||
},
|
||||
}
|
||||
|
||||
var configProvidersCommand = &cobra.Command{
|
||||
Use: "providers",
|
||||
Short: `List in JSON format all the providers and options.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return fs.JSONListProviders()
|
||||
},
|
||||
}
|
||||
|
||||
var configCreateCommand = &cobra.Command{
|
||||
Use: "create <name> <type> [<key> <value>]*",
|
||||
Short: `Create a new remote with name, type and options.`,
|
||||
Long: `
|
||||
Create a new remote of <name> with <type> and options. The options
|
||||
should be passed in in pairs of <key> <value>.
|
||||
|
||||
For example to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 256, command, args)
|
||||
return fs.CreateRemote(args[0], args[1], args[2:])
|
||||
},
|
||||
}
|
||||
|
||||
var configUpdateCommand = &cobra.Command{
|
||||
Use: "update <name> [<key> <value>]+",
|
||||
Short: `Update options in an existing remote.`,
|
||||
Long: `
|
||||
Update an existing remote's options. The options should be passed in
|
||||
in pairs of <key> <value>.
|
||||
|
||||
For example to update the env_auth field of a remote of name myremote you would do:
|
||||
|
||||
rclone config update myremote swift env_auth true
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 256, command, args)
|
||||
return fs.UpdateRemote(args[0], args[1:])
|
||||
},
|
||||
}
|
||||
|
||||
var configDeleteCommand = &cobra.Command{
|
||||
Use: "delete <name>",
|
||||
Short: `Delete an existing remote <name>.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fs.DeleteRemote(args[0])
|
||||
},
|
||||
}
|
||||
|
||||
var configPasswordCommand = &cobra.Command{
|
||||
Use: "password <name> [<key> <value>]+",
|
||||
Short: `Update password in an existing remote.`,
|
||||
Long: `
|
||||
Update an existing remote's password. The password
|
||||
should be passed in in pairs of <key> <value>.
|
||||
|
||||
For example to set password of a remote of name myremote you would do:
|
||||
|
||||
rclone config password myremote fieldname mypassword
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 256, command, args)
|
||||
return fs.PasswordRemote(args[0], args[1:])
|
||||
},
|
||||
}
|
||||
63
cmd/copy/copy.go
Normal file
63
cmd/copy/copy.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Long: `
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
|
||||
Note that it is always the contents of the directory that is synced,
|
||||
not the directory so when source:path is a directory, it's the
|
||||
contents of source:path that are copied, not the directory name and
|
||||
contents.
|
||||
|
||||
If dest:path doesn't exist, it is created and the source:path contents
|
||||
go there.
|
||||
|
||||
For example
|
||||
|
||||
rclone copy source:sourcepath dest:destpath
|
||||
|
||||
Let's say there are two files in sourcepath
|
||||
|
||||
sourcepath/one.txt
|
||||
sourcepath/two.txt
|
||||
|
||||
This copies them to
|
||||
|
||||
destpath/one.txt
|
||||
destpath/two.txt
|
||||
|
||||
Not to
|
||||
|
||||
destpath/sourcepath/one.txt
|
||||
destpath/sourcepath/two.txt
|
||||
|
||||
If you are familiar with ` + "`rsync`" + `, rclone always works as if you had
|
||||
written a trailing / - meaning "copy the contents of this directory".
|
||||
This applies to all commands and whether you are talking about the
|
||||
source or destination.
|
||||
|
||||
See the ` + "`--no-traverse`" + ` option for controlling whether rclone lists
|
||||
the destination directory or not.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
return fs.CopyDir(fdst, fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
53
cmd/copyto/copyto.go
Normal file
53
cmd/copyto/copyto.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package copyto
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "copyto source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Long: `
|
||||
If source:path is a file or directory then it copies it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
This can be used to upload single files to other than their current
|
||||
name. If the source is a directory then it acts exactly like the copy
|
||||
command.
|
||||
|
||||
So
|
||||
|
||||
rclone copyto src dst
|
||||
|
||||
where src and dst are rclone paths, either remote:path or
|
||||
/path/to/local or C:\windows\path\if\on\windows.
|
||||
|
||||
This will:
|
||||
|
||||
if src is file
|
||||
copy it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
copy it to dst, overwriting existing files if they exist
|
||||
see copy command for full details
|
||||
|
||||
This doesn't transfer unchanged files, testing by size and
|
||||
modification time or MD5SUM. It doesn't delete files from the
|
||||
destination.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return fs.CopyDir(fdst, fsrc)
|
||||
}
|
||||
return fs.CopyFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
},
|
||||
}
|
||||
102
cmd/cryptcheck/cryptcheck.go
Normal file
102
cmd/cryptcheck/cryptcheck.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package cryptcheck
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/crypt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "cryptcheck remote:path cryptedremote:path",
|
||||
Short: `Cryptcheck checks the integrity of a crypted remote.`,
|
||||
Long: `
|
||||
rclone cryptcheck checks a remote against a crypted remote. This is
|
||||
the equivalent of running rclone check, but able to check the
|
||||
checksums of the crypted remote.
|
||||
|
||||
For it to work the underlying remote of the cryptedremote must support
|
||||
some kind of checksum.
|
||||
|
||||
It works by reading the nonce from each file on the cryptedremote: and
|
||||
using that to encrypt each file on the remote:. It then checks the
|
||||
checksum of the underlying file on the cryptedremote: against the
|
||||
checksum of the file it has just encrypted.
|
||||
|
||||
Use it like this
|
||||
|
||||
rclone cryptcheck /path/to/files encryptedremote:path
|
||||
|
||||
You can use it like this also, but that will involve downloading all
|
||||
the files in remote:path.
|
||||
|
||||
rclone cryptcheck remote:path encryptedremote:path
|
||||
|
||||
After it has run it will log the status of the encryptedremote:.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(false, true, command, func() error {
|
||||
return cryptCheck(fdst, fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
// cryptCheck checks the integrity of a crypted remote
|
||||
func cryptCheck(fdst, fsrc fs.Fs) error {
|
||||
// Check to see fcrypt is a crypt
|
||||
fcrypt, ok := fdst.(*crypt.Fs)
|
||||
if !ok {
|
||||
return errors.Errorf("%s:%s is not a crypt remote", fdst.Name(), fdst.Root())
|
||||
}
|
||||
// Find a hash to use
|
||||
funderlying := fcrypt.UnWrap()
|
||||
hashType := funderlying.Hashes().GetOne()
|
||||
if hashType == fs.HashNone {
|
||||
return errors.Errorf("%s:%s does not support any hashes", funderlying.Name(), funderlying.Root())
|
||||
}
|
||||
fs.Infof(nil, "Using %v for hash comparisons", hashType)
|
||||
|
||||
// checkIdentical checks to see if dst and src are identical
|
||||
//
|
||||
// it returns true if differences were found
|
||||
// it also returns whether it couldn't be hashed
|
||||
checkIdentical := func(dst, src fs.Object) (differ bool, noHash bool) {
|
||||
cryptDst := dst.(*crypt.Object)
|
||||
underlyingDst := cryptDst.UnWrap()
|
||||
underlyingHash, err := underlyingDst.Hash(hashType)
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
|
||||
return true, false
|
||||
}
|
||||
if underlyingHash == "" {
|
||||
return false, true
|
||||
}
|
||||
cryptHash, err := fcrypt.ComputeHash(cryptDst, src, hashType)
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
fs.Errorf(dst, "Error computing hash: %v", err)
|
||||
return true, false
|
||||
}
|
||||
if cryptHash == "" {
|
||||
return false, true
|
||||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = errors.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.Stats.Error(err)
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false
|
||||
}
|
||||
fs.Debugf(src, "OK")
|
||||
return false, false
|
||||
}
|
||||
|
||||
return fs.CheckFn(fcrypt, fsrc, checkIdentical)
|
||||
}
|
||||
59
cmd/cryptdecode/cryptdecode.go
Normal file
59
cmd/cryptdecode/cryptdecode.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package cryptdecode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/crypt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cryptdecode encryptedremote: encryptedfilename",
|
||||
Short: `Cryptdecode returns unencrypted file names.`,
|
||||
Long: `
|
||||
rclone cryptdecode returns unencrypted file names when provided with
|
||||
a list of encrypted file names. List limit is 10 items.
|
||||
|
||||
use it like this
|
||||
|
||||
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 11, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return cryptDecode(fsrc, args[1:])
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
// cryptDecode returns the unencrypted file name
|
||||
func cryptDecode(fsrc fs.Fs, args []string) error {
|
||||
// Check if fsrc is a crypt
|
||||
fcrypt, ok := fsrc.(*crypt.Fs)
|
||||
if !ok {
|
||||
return errors.Errorf("%s:%s is not a crypt remote", fsrc.Name(), fsrc.Root())
|
||||
}
|
||||
|
||||
output := ""
|
||||
|
||||
for _, encryptedFileName := range args {
|
||||
fileName, err := fcrypt.DecryptFileName(encryptedFileName)
|
||||
if err != nil {
|
||||
output += fmt.Sprintln(encryptedFileName, "\t", "Failed to decrypt")
|
||||
} else {
|
||||
output += fmt.Sprintln(encryptedFileName, "\t", fileName)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf(output)
|
||||
|
||||
return nil
|
||||
}
|
||||
31
cmd/dbhashsum/dbhashsum.go
Normal file
31
cmd/dbhashsum/dbhashsum.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package dbhashsum
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "dbhashsum remote:path",
|
||||
Short: `Produces a Dropbox hash file for all the objects in the path.`,
|
||||
Long: `
|
||||
Produces a Dropbox hash file for all the objects in the path. The
|
||||
hashes are calculated according to [Dropbox content hash
|
||||
rules](https://www.dropbox.com/developers/reference/content-hash).
|
||||
The output is in the same format as md5sum and sha1sum.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.DropboxHashSum(fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
117
cmd/dedupe/dedupe.go
Normal file
117
cmd/dedupe/dedupe.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package dedupe
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
dedupeMode = fs.DeduplicateInteractive
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().VarP(&dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "dedupe [mode] remote:path",
|
||||
Short: `Interactively find duplicate files and delete/rename them.`,
|
||||
Long: `
|
||||
By default ` + "`" + `dedupe` + "`" + ` interactively finds duplicate files and offers to
|
||||
delete all but one or rename them to be different. Only useful with
|
||||
Google Drive which can have duplicate file names.
|
||||
|
||||
In the first pass it will merge directories with the same name. It
|
||||
will do this iteratively until all the identical directories have been
|
||||
merged.
|
||||
|
||||
The ` + "`" + `dedupe` + "`" + ` command will delete all but one of any identical (same
|
||||
md5sum) files it finds without confirmation. This means that for most
|
||||
duplicated files the ` + "`" + `dedupe` + "`" + ` command will not be interactive. You
|
||||
can use ` + "`" + `--dry-run` + "`" + ` to see what would happen without doing anything.
|
||||
|
||||
Here is an example run.
|
||||
|
||||
Before - with duplicates
|
||||
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||
564374 2016-03-05 16:23:06.731000000 one.txt
|
||||
6048320 2016-03-05 16:18:26.092000000 one.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
|
||||
Now the ` + "`" + `dedupe` + "`" + ` session
|
||||
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 duplicates - deleting identical copies
|
||||
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 duplicates - deleting identical copies
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> r
|
||||
two-1.txt: renamed from: two.txt
|
||||
two-2.txt: renamed from: two.txt
|
||||
two-3.txt: renamed from: two.txt
|
||||
|
||||
The result being
|
||||
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
||||
|
||||
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value
|
||||
|
||||
* ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
||||
* ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
||||
* ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
||||
* ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
||||
* ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
||||
* ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
||||
|
||||
For example to rename all the identically named photos in your Google Photos directory, do
|
||||
|
||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||
|
||||
Or
|
||||
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
if len(args) > 1 {
|
||||
err := dedupeMode.Set(args[0])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
args = args[1:]
|
||||
}
|
||||
fdst := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.Deduplicate(fdst, dedupeMode)
|
||||
})
|
||||
},
|
||||
}
|
||||
41
cmd/delete/delete.go
Normal file
41
cmd/delete/delete.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package delete
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "delete remote:path",
|
||||
Short: `Remove the contents of path.`,
|
||||
Long: `
|
||||
Remove the contents of path. Unlike ` + "`" + `purge` + "`" + ` it obeys include/exclude
|
||||
filters so can be used to selectively delete files.
|
||||
|
||||
Eg delete all files bigger than 100MBytes
|
||||
|
||||
Check what would be deleted first (use either)
|
||||
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
|
||||
Then delete
|
||||
|
||||
rclone --min-size 100M delete remote:path
|
||||
|
||||
That reads "delete everything with a minimum size of 100 MB", hence
|
||||
delete all files bigger than 100MBytes.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return fs.Delete(fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
19
cmd/genautocomplete/genautocomplete.go
Normal file
19
cmd/genautocomplete/genautocomplete.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(completionDefinition)
|
||||
}
|
||||
|
||||
var completionDefinition = &cobra.Command{
|
||||
Use: "genautocomplete [shell]",
|
||||
Short: `Output completion script for a given shell.`,
|
||||
Long: `
|
||||
Generates a shell completion script for rclone.
|
||||
Run with --help to list the supported shells.
|
||||
`,
|
||||
}
|
||||
44
cmd/genautocomplete/genautocomplete_bash.go
Normal file
44
cmd/genautocomplete/genautocomplete_bash.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
completionDefinition.AddCommand(bashCommandDefinition)
|
||||
}
|
||||
|
||||
var bashCommandDefinition = &cobra.Command{
|
||||
Use: "bash [output_file]",
|
||||
Short: `Output bash completion script for rclone.`,
|
||||
Long: `
|
||||
Generates a bash shell autocompletion script for rclone.
|
||||
|
||||
This writes to /etc/bash_completion.d/rclone by default so will
|
||||
probably need to be run with sudo or as root, eg
|
||||
|
||||
sudo rclone genautocomplete bash
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
. /etc/bash_completion
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/bash_completion.d/rclone"
|
||||
if len(args) > 0 {
|
||||
out = args[0]
|
||||
}
|
||||
err := cmd.Root.GenBashCompletionFile(out)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
35
cmd/genautocomplete/genautocomplete_test.go
Normal file
35
cmd/genautocomplete/genautocomplete_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCompletionBash(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_bash")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
bs, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionZsh(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
bs, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
50
cmd/genautocomplete/genautocomplete_zsh.go
Normal file
50
cmd/genautocomplete/genautocomplete_zsh.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
completionDefinition.AddCommand(zshCommandDefinition)
|
||||
}
|
||||
|
||||
var zshCommandDefinition = &cobra.Command{
|
||||
Use: "zsh [output_file]",
|
||||
Short: `Output zsh completion script for rclone.`,
|
||||
Long: `
|
||||
Generates a zsh autocompletion script for rclone.
|
||||
|
||||
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
||||
probably need to be run with sudo or as root, eg
|
||||
|
||||
sudo rclone genautocomplete zsh
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
autoload -U compinit && compinit
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/usr/share/zsh/vendor-completions/_rclone"
|
||||
if len(args) > 0 {
|
||||
out = args[0]
|
||||
}
|
||||
outFile, err := os.Create(out)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() { _ = outFile.Close() }()
|
||||
err = cmd.Root.GenZshCompletion(outFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
55
cmd/gendocs/gendocs.go
Normal file
55
cmd/gendocs/gendocs.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package gendocs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
const gendocFrontmatterTemplate = `---
|
||||
date: %s
|
||||
title: "%s"
|
||||
slug: %s
|
||||
url: %s
|
||||
---
|
||||
`
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "gendocs output_directory",
|
||||
Short: `Output markdown docs for rclone to the directory supplied.`,
|
||||
Long: `
|
||||
This produces markdown docs for the rclone commands to the directory
|
||||
supplied. These are in a format suitable for hugo to render into the
|
||||
rclone.org website.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
out := args[0]
|
||||
err := os.MkdirAll(out, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
now := time.Now().Format(time.RFC3339)
|
||||
prepender := func(filename string) string {
|
||||
name := filepath.Base(filename)
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
url := "/commands/" + strings.ToLower(base) + "/"
|
||||
return fmt.Sprintf(gendocFrontmatterTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
|
||||
}
|
||||
linkHandler := func(name string) string {
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
return "/commands/" + strings.ToLower(base) + "/"
|
||||
}
|
||||
return doc.GenMarkdownTreeCustom(cmd.Root, out, prepender, linkHandler)
|
||||
},
|
||||
}
|
||||
18
cmd/info/all.sh
Executable file
18
cmd/info/all.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
exec rclone --check-normalization=true --check-control=true --check-length=true info \
|
||||
/tmp/testInfo \
|
||||
TestAmazonCloudDrive:testInfo \
|
||||
TestB2:testInfo \
|
||||
TestCryptDrive:testInfo \
|
||||
TestCryptSwift:testInfo \
|
||||
TestDrive:testInfo \
|
||||
TestDropbox:testInfo \
|
||||
TestGoogleCloudStorage:rclone-testinfo \
|
||||
TestOneDrive:testInfo \
|
||||
TestS3:rclone-testinfo \
|
||||
TestSftp:testInfo \
|
||||
TestSwift:testInfo \
|
||||
TestYandex:testInfo \
|
||||
TestFTP:testInfo
|
||||
|
||||
# TestHubic:testInfo \
|
||||
267
cmd/info/info.go
Normal file
267
cmd/info/info.go
Normal file
@@ -0,0 +1,267 @@
|
||||
package info
|
||||
|
||||
// FIXME once translations are implemented will need a no-escape
|
||||
// option for Put so we can make these tests work agaig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
checkNormalization bool
|
||||
checkControl bool
|
||||
checkLength bool
|
||||
checkStreaming bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
|
||||
commandDefintion.Flags().BoolVarP(&checkControl, "check-control", "", true, "Check control characters.")
|
||||
commandDefintion.Flags().BoolVarP(&checkLength, "check-length", "", true, "Check max filename length.")
|
||||
commandDefintion.Flags().BoolVarP(&checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "info [remote:path]+",
|
||||
Short: `Discovers file name or other limitations for paths.`,
|
||||
Long: `rclone info discovers what filenames and upload methods are possible
|
||||
to write to the paths passed in and how long they can be. It can take some
|
||||
time. It will write test files into the remote:path passed in. It outputs
|
||||
a bit of go code for each one.
|
||||
`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1E6, command, args)
|
||||
for i := range args {
|
||||
f := cmd.NewFsDst(args[i : i+1])
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return readInfo(f)
|
||||
})
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
type results struct {
|
||||
f fs.Fs
|
||||
mu sync.Mutex
|
||||
charNeedsEscaping map[rune]bool
|
||||
maxFileLength int
|
||||
canWriteUnnormalized bool
|
||||
canReadUnnormalized bool
|
||||
canReadRenormalized bool
|
||||
canStream bool
|
||||
}
|
||||
|
||||
func newResults(f fs.Fs) *results {
|
||||
return &results{
|
||||
f: f,
|
||||
charNeedsEscaping: make(map[rune]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Print the results to stdout
|
||||
func (r *results) Print() {
|
||||
fmt.Printf("// %s\n", r.f.Name())
|
||||
if checkControl {
|
||||
escape := []string{}
|
||||
for c, needsEscape := range r.charNeedsEscaping {
|
||||
if needsEscape {
|
||||
escape = append(escape, fmt.Sprintf("0x%02X", c))
|
||||
}
|
||||
}
|
||||
sort.Strings(escape)
|
||||
fmt.Printf("charNeedsEscaping = []byte{\n")
|
||||
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
|
||||
fmt.Printf("}\n")
|
||||
}
|
||||
if checkLength {
|
||||
fmt.Printf("maxFileLength = %d\n", r.maxFileLength)
|
||||
}
|
||||
if checkNormalization {
|
||||
fmt.Printf("canWriteUnnormalized = %v\n", r.canWriteUnnormalized)
|
||||
fmt.Printf("canReadUnnormalized = %v\n", r.canReadUnnormalized)
|
||||
fmt.Printf("canReadRenormalized = %v\n", r.canReadRenormalized)
|
||||
}
|
||||
if checkStreaming {
|
||||
fmt.Printf("canStream = %v\n", r.canStream)
|
||||
}
|
||||
}
|
||||
|
||||
// writeFile writes a file with some random contents
|
||||
func (r *results) writeFile(path string) (fs.Object, error) {
|
||||
contents := fstest.RandomString(50)
|
||||
src := fs.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
|
||||
return r.f.Put(bytes.NewBufferString(contents), src)
|
||||
}
|
||||
|
||||
// check whether normalization is enforced and check whether it is
|
||||
// done on the files anyway
|
||||
func (r *results) checkUTF8Normalization() {
|
||||
unnormalized := "Héroique"
|
||||
normalized := "Héroique"
|
||||
_, err := r.writeFile(unnormalized)
|
||||
if err != nil {
|
||||
r.canWriteUnnormalized = false
|
||||
return
|
||||
}
|
||||
r.canWriteUnnormalized = true
|
||||
_, err = r.f.NewObject(unnormalized)
|
||||
if err == nil {
|
||||
r.canReadUnnormalized = true
|
||||
}
|
||||
_, err = r.f.NewObject(normalized)
|
||||
if err == nil {
|
||||
r.canReadRenormalized = true
|
||||
}
|
||||
}
|
||||
|
||||
// check we can write file with the rune passed in
|
||||
func (r *results) checkChar(c rune) {
|
||||
fs.Infof(r.f, "Writing file 0x%02X", c)
|
||||
path := fmt.Sprintf("0x%02X-%c-", c, c)
|
||||
_, err := r.writeFile(path)
|
||||
escape := false
|
||||
if err != nil {
|
||||
fs.Infof(r.f, "Couldn't write file 0x%02X", c)
|
||||
} else {
|
||||
fs.Infof(r.f, "OK writing file 0x%02X", c)
|
||||
}
|
||||
r.mu.Lock()
|
||||
r.charNeedsEscaping[c] = escape
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
// check we can write a file with the control chars
|
||||
func (r *results) checkControls() {
|
||||
fs.Infof(r.f, "Trying to create control character file names")
|
||||
// Concurrency control
|
||||
tokens := make(chan struct{}, fs.Config.Checkers)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
tokens <- struct{}{}
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
for i := rune(0); i < 128; i++ {
|
||||
if i == 0 || i == '/' {
|
||||
// We're not even going to check NULL or /
|
||||
r.charNeedsEscaping[i] = true
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
c := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
token := <-tokens
|
||||
r.checkChar(c)
|
||||
tokens <- token
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
fs.Infof(r.f, "Done trying to create control character file names")
|
||||
}
|
||||
|
||||
// find the max file name size we can use
|
||||
func (r *results) findMaxLength() {
|
||||
const maxLen = 16 * 1024
|
||||
name := make([]byte, maxLen)
|
||||
for i := range name {
|
||||
name[i] = 'a'
|
||||
}
|
||||
// Find the first size of filename we can't write
|
||||
i := sort.Search(len(name), func(i int) (fail bool) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
|
||||
fail = true
|
||||
}
|
||||
}()
|
||||
|
||||
path := string(name[:i])
|
||||
_, err := r.writeFile(path)
|
||||
if err != nil {
|
||||
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
|
||||
return true
|
||||
}
|
||||
fs.Infof(r.f, "Wrote file with name length %d", i)
|
||||
return false
|
||||
})
|
||||
r.maxFileLength = i - 1
|
||||
fs.Infof(r.f, "Max file length is %d", r.maxFileLength)
|
||||
}
|
||||
|
||||
func (r *results) checkStreaming() {
|
||||
putter := r.f.Put
|
||||
if r.f.Features().PutStream != nil {
|
||||
fs.Infof(r.f, "Given remote has specialized streaming function. Using that to test streaming.")
|
||||
putter = r.f.Features().PutStream
|
||||
}
|
||||
|
||||
contents := "thinking of test strings is hard"
|
||||
buf := bytes.NewBufferString(contents)
|
||||
hashIn := fs.NewMultiHasher()
|
||||
in := io.TeeReader(buf, hashIn)
|
||||
|
||||
objIn := fs.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f)
|
||||
objR, err := putter(in, objIn)
|
||||
if err != nil {
|
||||
fs.Infof(r.f, "Streamed file failed to upload (%v)", err)
|
||||
r.canStream = false
|
||||
return
|
||||
}
|
||||
|
||||
hashes := hashIn.Sums()
|
||||
types := objR.Fs().Hashes().Array()
|
||||
for _, hash := range types {
|
||||
sum, err := objR.Hash(hash)
|
||||
if err != nil {
|
||||
fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", hash, err)
|
||||
r.canStream = false
|
||||
return
|
||||
}
|
||||
if !fs.HashEquals(hashes[hash], sum) {
|
||||
fs.Infof(r.f, "Streamed file has incorrect hash %v: expecting %q got %q", hash, hashes[hash], sum)
|
||||
r.canStream = false
|
||||
return
|
||||
}
|
||||
}
|
||||
if int64(len(contents)) != objR.Size() {
|
||||
fs.Infof(r.f, "Streamed file has incorrect file size: expecting %d got %d", len(contents), objR.Size())
|
||||
r.canStream = false
|
||||
return
|
||||
}
|
||||
r.canStream = true
|
||||
}
|
||||
|
||||
func readInfo(f fs.Fs) error {
|
||||
err := f.Mkdir("")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't mkdir")
|
||||
}
|
||||
r := newResults(f)
|
||||
if checkControl {
|
||||
r.checkControls()
|
||||
}
|
||||
if checkLength {
|
||||
r.findMaxLength()
|
||||
}
|
||||
if checkNormalization {
|
||||
r.checkUTF8Normalization()
|
||||
}
|
||||
if checkStreaming {
|
||||
r.checkStreaming()
|
||||
}
|
||||
r.Print()
|
||||
return nil
|
||||
}
|
||||
49
cmd/listremotes/listremotes.go
Normal file
49
cmd/listremotes/listremotes.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package ls
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
listLong bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&listLong, "long", "l", listLong, "Show the type as well as names.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "listremotes",
|
||||
Short: `List all the remotes in the config file.`,
|
||||
Long: `
|
||||
rclone listremotes lists all the available remotes from the config file.
|
||||
|
||||
When uses with the -l flag it lists the types too.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
remotes := fs.ConfigFileSections()
|
||||
sort.Strings(remotes)
|
||||
maxlen := 1
|
||||
for _, remote := range remotes {
|
||||
if len(remote) > maxlen {
|
||||
maxlen = len(remote)
|
||||
}
|
||||
}
|
||||
for _, remote := range remotes {
|
||||
if listLong {
|
||||
remoteType := fs.ConfigFileGet(remote, "type", "UNKNOWN")
|
||||
fmt.Printf("%-*s %s\n", maxlen+1, remote+":", remoteType)
|
||||
} else {
|
||||
fmt.Printf("%s:\n", remote)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
25
cmd/ls/ls.go
Normal file
25
cmd/ls/ls.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package ls
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "ls remote:path",
|
||||
Short: `List all the objects in the path with size and path.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.List(fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
46
cmd/ls2/ls2.go
Normal file
46
cmd/ls2/ls2.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package ls2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
recurse bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "ls2 remote:path",
|
||||
Short: `List directories and objects in the path.`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.Walk(fsrc, "", false, fs.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
fs.Errorf(path, "error listing: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, entry := range entries {
|
||||
_, isDir := entry.(fs.Directory)
|
||||
if isDir {
|
||||
fmt.Println(entry.Remote() + "/")
|
||||
} else {
|
||||
fmt.Println(entry.Remote())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
},
|
||||
}
|
||||
25
cmd/lsd/lsd.go
Normal file
25
cmd/lsd/lsd.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package lsd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "lsd remote:path",
|
||||
Short: `List all directories/containers/buckets in the path.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.ListDir(fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
147
cmd/lsjson/lsjson.go
Normal file
147
cmd/lsjson/lsjson.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package lsjson
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
recurse bool
|
||||
showHash bool
|
||||
noModTime bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
commandDefintion.Flags().BoolVarP(&showHash, "hash", "", false, "Include hashes in the output (may take longer).")
|
||||
commandDefintion.Flags().BoolVarP(&noModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
|
||||
}
|
||||
|
||||
// lsJSON in the struct which gets marshalled for each line
|
||||
type lsJSON struct {
|
||||
Path string
|
||||
Name string
|
||||
Size int64
|
||||
ModTime Timestamp //`json:",omitempty"`
|
||||
IsDir bool
|
||||
Hashes map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Timestamp a time in RFC3339 format with Nanosecond precision secongs
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON
|
||||
func (t Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
tt := time.Time(t)
|
||||
if tt.IsZero() {
|
||||
return []byte(`""`), nil
|
||||
}
|
||||
return []byte(`"` + tt.Format(time.RFC3339Nano) + `"`), nil
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "lsjson remote:path",
|
||||
Short: `List directories and objects in the path in JSON format.`,
|
||||
Long: `List directories and objects in the path in JSON format.
|
||||
|
||||
The output is an array of Items, where each Item looks like this
|
||||
|
||||
{
|
||||
"Hashes" : {
|
||||
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
|
||||
"MD5" : "b1946ac92492d2347c6235b4d2611184",
|
||||
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
|
||||
},
|
||||
"IsDir" : false,
|
||||
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
|
||||
"Name" : "file.txt",
|
||||
"Path" : "full/path/goes/here/file.txt",
|
||||
"Size" : 6
|
||||
}
|
||||
|
||||
If --hash is not specified the the Hashes property won't be emitted.
|
||||
|
||||
If --no-modtime is specified then ModTime will be blank.
|
||||
|
||||
The time is in RFC3339 format with nanosecond precision.
|
||||
|
||||
The whole output can be processed as a JSON blob, or alternatively it
|
||||
can be processed line by line as each item is written one to a line.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
fmt.Println("[")
|
||||
first := true
|
||||
err := fs.Walk(fsrc, "", false, fs.ConfigMaxDepth(recurse), func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error(err)
|
||||
fs.Errorf(dirPath, "error listing: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, entry := range entries {
|
||||
item := lsJSON{
|
||||
Path: entry.Remote(),
|
||||
Name: path.Base(entry.Remote()),
|
||||
Size: entry.Size(),
|
||||
}
|
||||
if !noModTime {
|
||||
item.ModTime = Timestamp(entry.ModTime())
|
||||
}
|
||||
switch x := entry.(type) {
|
||||
case fs.Directory:
|
||||
item.IsDir = true
|
||||
case fs.Object:
|
||||
item.IsDir = false
|
||||
if showHash {
|
||||
item.Hashes = make(map[string]string)
|
||||
for _, hashType := range x.Fs().Hashes().Array() {
|
||||
hash, err := x.Hash(hashType)
|
||||
if err != nil {
|
||||
fs.Errorf(x, "Failed to read hash: %v", err)
|
||||
} else if hash != "" {
|
||||
item.Hashes[hashType.String()] = hash
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
fs.Errorf(nil, "Unknown type %T in listing", entry)
|
||||
}
|
||||
out, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal list object")
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
fmt.Print(",\n")
|
||||
}
|
||||
_, err = os.Stdout.Write(out)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to write to output")
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error listing JSON")
|
||||
}
|
||||
if !first {
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println("]")
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
25
cmd/lsl/lsl.go
Normal file
25
cmd/lsl/lsl.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package lsl
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "lsl remote:path",
|
||||
Short: `List all the objects path with modification time, size and path.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.ListLong(fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
29
cmd/md5sum/md5sum.go
Normal file
29
cmd/md5sum/md5sum.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package md5sum
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "md5sum remote:path",
|
||||
Short: `Produces an md5sum file for all the objects in the path.`,
|
||||
Long: `
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return fs.Md5sum(fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
49
cmd/memtest/memtest.go
Normal file
49
cmd/memtest/memtest.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package memtest
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "memtest remote:path",
|
||||
Short: `Load all the objects at remote:path and report memory stats.`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
objects, _, err := fs.Count(fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
objs := make([]fs.Object, 0, objects)
|
||||
var before, after runtime.MemStats
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&before)
|
||||
var mu sync.Mutex
|
||||
err = fs.ListFn(fsrc, func(o fs.Object) {
|
||||
mu.Lock()
|
||||
objs = append(objs, o)
|
||||
mu.Unlock()
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&after)
|
||||
usedMemory := after.Alloc - before.Alloc
|
||||
fs.Logf(nil, "%d objects took %d bytes, %.1f bytes/object", len(objs), usedMemory, float64(usedMemory)/float64(len(objs)))
|
||||
fs.Logf(nil, "System memory changed from %d to %d bytes a change of %d bytes", before.Sys, after.Sys, after.Sys-before.Sys)
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
23
cmd/mkdir/mkdir.go
Normal file
23
cmd/mkdir/mkdir.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package mkdir
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "mkdir remote:path",
|
||||
Short: `Make the path if it doesn't already exist.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst := cmd.NewFsDst(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return fs.Mkdir(fdst, "")
|
||||
})
|
||||
},
|
||||
}
|
||||
182
cmd/mount/dir.go
Normal file
182
cmd/mount/dir.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Dir represents a directory entry
|
||||
type Dir struct {
|
||||
*vfs.Dir
|
||||
}
|
||||
|
||||
// Check interface satsified
|
||||
var _ fusefs.Node = (*Dir)(nil)
|
||||
|
||||
// Attr updates the attributes of a directory
|
||||
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer fs.Trace(d, "")("attr=%+v, err=%v", a, &err)
|
||||
a.Gid = d.VFS().Opt.GID
|
||||
a.Uid = d.VFS().Opt.UID
|
||||
a.Mode = os.ModeDir | d.VFS().Opt.DirPerms
|
||||
modTime := d.ModTime()
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
a.Ctime = modTime
|
||||
a.Crtime = modTime
|
||||
// FIXME include Valid so get some caching?
|
||||
// FIXME fs.Debugf(d.path, "Dir.Attr %+v", a)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeSetattrer = (*Dir)(nil)
|
||||
|
||||
// Setattr handles attribute changes from FUSE. Currently supports ModTime only.
|
||||
func (d *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
|
||||
defer fs.Trace(d, "stat=%+v", req)("err=%v", &err)
|
||||
if d.VFS().Opt.NoModTime {
|
||||
return nil
|
||||
}
|
||||
|
||||
if req.Valid.MtimeNow() {
|
||||
err = d.SetModTime(time.Now())
|
||||
} else if req.Valid.Mtime() {
|
||||
err = d.SetModTime(req.Mtime)
|
||||
}
|
||||
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeRequestLookuper = (*Dir)(nil)
|
||||
|
||||
// Lookup looks up a specific entry in the receiver.
|
||||
//
|
||||
// Lookup should return a Node corresponding to the entry. If the
|
||||
// name does not exist in the directory, Lookup should return ENOENT.
|
||||
//
|
||||
// Lookup need not to handle the names "." and "..".
|
||||
func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) {
|
||||
defer fs.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
|
||||
mnode, err := d.Dir.Stat(req.Name)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
switch x := mnode.(type) {
|
||||
case *vfs.File:
|
||||
return &File{x}, nil
|
||||
case *vfs.Dir:
|
||||
return &Dir{x}, nil
|
||||
}
|
||||
panic("bad type")
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.HandleReadDirAller = (*Dir)(nil)
|
||||
|
||||
// ReadDirAll reads the contents of the directory
|
||||
func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) {
|
||||
itemsRead := -1
|
||||
defer fs.Trace(d, "")("item=%d, err=%v", &itemsRead, &err)
|
||||
items, err := d.Dir.ReadDirAll()
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
for _, node := range items {
|
||||
var dirent = fuse.Dirent{
|
||||
// Inode FIXME ???
|
||||
Type: fuse.DT_File,
|
||||
Name: node.Name(),
|
||||
}
|
||||
if node.IsDir() {
|
||||
dirent.Type = fuse.DT_Dir
|
||||
}
|
||||
dirents = append(dirents, dirent)
|
||||
}
|
||||
itemsRead = len(dirents)
|
||||
return dirents, nil
|
||||
}
|
||||
|
||||
var _ fusefs.NodeCreater = (*Dir)(nil)
|
||||
|
||||
// Create makes a new file
|
||||
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
|
||||
defer fs.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
|
||||
file, err := d.Dir.Create(req.Name)
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
fh, err := file.Open(int(req.Flags) | os.O_CREATE)
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
return &File{file}, &FileHandle{fh}, err
|
||||
}
|
||||
|
||||
var _ fusefs.NodeMkdirer = (*Dir)(nil)
|
||||
|
||||
// Mkdir creates a new directory
|
||||
func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.Node, err error) {
|
||||
defer fs.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
|
||||
dir, err := d.Dir.Mkdir(req.Name)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
return &Dir{dir}, nil
|
||||
}
|
||||
|
||||
var _ fusefs.NodeRemover = (*Dir)(nil)
|
||||
|
||||
// Remove removes the entry with the given name from
|
||||
// the receiver, which must be a directory. The entry to be removed
|
||||
// may correspond to a file (unlink) or to a directory (rmdir).
|
||||
func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
|
||||
defer fs.Trace(d, "name=%q", req.Name)("err=%v", &err)
|
||||
err = d.Dir.RemoveName(req.Name)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeRenamer = (*Dir)(nil)
|
||||
|
||||
// Rename the file
|
||||
func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs.Node) (err error) {
|
||||
defer fs.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err)
|
||||
destDir, ok := newDir.(*Dir)
|
||||
if !ok {
|
||||
return errors.Errorf("Unknown Dir type %T", newDir)
|
||||
}
|
||||
|
||||
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeFsyncer = (*Dir)(nil)
|
||||
|
||||
// Fsync the directory
|
||||
func (d *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
|
||||
defer fs.Trace(d, "")("err=%v", &err)
|
||||
err = d.Dir.Sync()
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
96
cmd/mount/file.go
Normal file
96
cmd/mount/file.go
Normal file
@@ -0,0 +1,96 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// File represents a file
|
||||
type File struct {
|
||||
*vfs.File
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.Node = (*File)(nil)
|
||||
|
||||
// Attr fills out the attributes for the file
|
||||
func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer fs.Trace(f, "")("a=%+v, err=%v", a, &err)
|
||||
modTime := f.File.ModTime()
|
||||
Size := uint64(f.File.Size())
|
||||
Blocks := (Size + 511) / 512
|
||||
a.Gid = f.VFS().Opt.GID
|
||||
a.Uid = f.VFS().Opt.UID
|
||||
a.Mode = f.VFS().Opt.FilePerms
|
||||
a.Size = Size
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
a.Ctime = modTime
|
||||
a.Crtime = modTime
|
||||
a.Blocks = Blocks
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeSetattrer = (*File)(nil)
|
||||
|
||||
// Setattr handles attribute changes from FUSE. Currently supports ModTime and Size only
|
||||
func (f *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
|
||||
defer fs.Trace(f, "a=%+v", req)("err=%v", &err)
|
||||
if !f.VFS().Opt.NoModTime {
|
||||
if req.Valid.MtimeNow() {
|
||||
err = f.File.SetModTime(time.Now())
|
||||
}
|
||||
if req.Valid.Mtime() {
|
||||
err = f.File.SetModTime(req.Mtime)
|
||||
}
|
||||
}
|
||||
if req.Valid.Size() {
|
||||
err = f.File.Truncate(int64(req.Size))
|
||||
}
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeOpener = (*File)(nil)
|
||||
|
||||
// Open the file for read or write
|
||||
func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fh fusefs.Handle, err error) {
|
||||
defer fs.Trace(f, "flags=%v", req.Flags)("fh=%v, err=%v", &fh, &err)
|
||||
|
||||
// fuse flags are based off syscall flags as are os flags, so
|
||||
// should be compatible
|
||||
//
|
||||
// we seem to be missing O_CREATE here so add it in to allow
|
||||
// file creation
|
||||
handle, err := f.File.Open(int(req.Flags) | os.O_CREATE)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
|
||||
// See if seeking is supported and set FUSE hint accordingly
|
||||
if _, err = handle.Seek(0, 1); err != nil {
|
||||
resp.Flags |= fuse.OpenNonSeekable
|
||||
}
|
||||
|
||||
return &FileHandle{handle}, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeFsyncer = (*File)(nil)
|
||||
|
||||
// Fsync the file
|
||||
//
|
||||
// Note that we don't do anything except return OK
|
||||
func (f *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
|
||||
defer fs.Trace(f, "")("err=%v", &err)
|
||||
return nil
|
||||
}
|
||||
95
cmd/mount/fs.go
Normal file
95
cmd/mount/fs.go
Normal file
@@ -0,0 +1,95 @@
|
||||
// FUSE main Fs
|
||||
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// FS represents the top level filing system
|
||||
type FS struct {
|
||||
*vfs.VFS
|
||||
f fs.Fs
|
||||
}
|
||||
|
||||
// Check interface satistfied
|
||||
var _ fusefs.FS = (*FS)(nil)
|
||||
|
||||
// NewFS makes a new FS
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
fsys := &FS{
|
||||
VFS: vfs.New(f, &vfsflags.Opt),
|
||||
f: f,
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
|
||||
// Root returns the root node
|
||||
func (f *FS) Root() (node fusefs.Node, err error) {
|
||||
defer fs.Trace("", "")("node=%+v, err=%v", &node, &err)
|
||||
root, err := f.VFS.Root()
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
return &Dir{root}, nil
|
||||
}
|
||||
|
||||
// Check interface satsified
|
||||
var _ fusefs.FSStatfser = (*FS)(nil)
|
||||
|
||||
// Statfs is called to obtain file system metadata.
|
||||
// It should write that data to resp.
|
||||
func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) (err error) {
|
||||
defer fs.Trace("", "")("stat=%+v, err=%v", resp, &err)
|
||||
const blockSize = 4096
|
||||
const fsBlocks = (1 << 50) / blockSize
|
||||
resp.Blocks = fsBlocks // Total data blocks in file system.
|
||||
resp.Bfree = fsBlocks // Free blocks in file system.
|
||||
resp.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
resp.Files = 1E9 // Total files in file system.
|
||||
resp.Ffree = 1E9 // Free files in file system.
|
||||
resp.Bsize = blockSize // Block size
|
||||
resp.Namelen = 255 // Maximum file name length?
|
||||
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Translate errors from mountlib
|
||||
func translateError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
switch errors.Cause(err) {
|
||||
case vfs.OK:
|
||||
return nil
|
||||
case vfs.ENOENT:
|
||||
return fuse.ENOENT
|
||||
case vfs.EEXIST:
|
||||
return fuse.EEXIST
|
||||
case vfs.EPERM:
|
||||
return fuse.EPERM
|
||||
case vfs.ECLOSED:
|
||||
return fuse.Errno(syscall.EBADF)
|
||||
case vfs.ENOTEMPTY:
|
||||
return fuse.Errno(syscall.ENOTEMPTY)
|
||||
case vfs.ESPIPE:
|
||||
return fuse.Errno(syscall.ESPIPE)
|
||||
case vfs.EBADF:
|
||||
return fuse.Errno(syscall.EBADF)
|
||||
case vfs.EROFS:
|
||||
return fuse.Errno(syscall.EROFS)
|
||||
case vfs.ENOSYS:
|
||||
return fuse.ENOSYS
|
||||
}
|
||||
return err
|
||||
}
|
||||
84
cmd/mount/handle.go
Normal file
84
cmd/mount/handle.go
Normal file
@@ -0,0 +1,84 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// FileHandle is an open for read file handle on a File
|
||||
type FileHandle struct {
|
||||
vfs.Handle
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.HandleReader = (*FileHandle)(nil)
|
||||
|
||||
// Read from the file handle
|
||||
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) (err error) {
|
||||
var n int
|
||||
defer fs.Trace(fh, "len=%d, offset=%d", req.Size, req.Offset)("read=%d, err=%v", &n, &err)
|
||||
data := make([]byte, req.Size)
|
||||
n, err = fh.Handle.ReadAt(data, req.Offset)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
resp.Data = data[:n]
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.HandleWriter = (*FileHandle)(nil)
|
||||
|
||||
// Write data to the file handle
|
||||
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
|
||||
defer fs.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
|
||||
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
resp.Size = int(n)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.HandleFlusher = (*FileHandle)(nil)
|
||||
|
||||
// Flush is called on each close() of a file descriptor. So if a
|
||||
// filesystem wants to return write errors in close() and the file has
|
||||
// cached dirty data, this is a good place to write back data and
|
||||
// return any errors. Since many applications ignore close() errors
|
||||
// this is not always useful.
|
||||
//
|
||||
// NOTE: The flush() method may be called more than once for each
|
||||
// open(). This happens if more than one file descriptor refers to an
|
||||
// opened file due to dup(), dup2() or fork() calls. It is not
|
||||
// possible to determine if a flush is final, so each flush should be
|
||||
// treated equally. Multiple write-flush sequences are relatively
|
||||
// rare, so this shouldn't be a problem.
|
||||
//
|
||||
// Filesystems shouldn't assume that flush will always be called after
|
||||
// some writes, or that if will be called at all.
|
||||
func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) (err error) {
|
||||
defer fs.Trace(fh, "")("err=%v", &err)
|
||||
return translateError(fh.Handle.Flush())
|
||||
}
|
||||
|
||||
var _ fusefs.HandleReleaser = (*FileHandle)(nil)
|
||||
|
||||
// Release is called when we are finished with the file handle
|
||||
//
|
||||
// It isn't called directly from userspace so the error is ignored by
|
||||
// the kernel
|
||||
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) (err error) {
|
||||
defer fs.Trace(fh, "")("err=%v", &err)
|
||||
return translateError(fh.Handle.Release())
|
||||
}
|
||||
163
cmd/mount/mount.go
Normal file
163
cmd/mount/mount.go
Normal file
@@ -0,0 +1,163 @@
|
||||
// Package mount implents a FUSE mounting system for rclone remotes.
|
||||
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/okzk/sdnotify"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
mountlib.NewMountCommand("mount", Mount)
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(device string) (options []fuse.MountOption) {
|
||||
options = []fuse.MountOption{
|
||||
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device), fuse.VolumeName(device),
|
||||
fuse.NoAppleDouble(),
|
||||
fuse.NoAppleXattr(),
|
||||
|
||||
// Options from benchmarking in the fuse module
|
||||
//fuse.MaxReadahead(64 * 1024 * 1024),
|
||||
//fuse.AsyncRead(), - FIXME this causes
|
||||
// ReadFileHandle.Read error: read /home/files/ISOs/xubuntu-15.10-desktop-amd64.iso: bad file descriptor
|
||||
// which is probably related to errors people are having
|
||||
//fuse.WritebackCache(),
|
||||
}
|
||||
if mountlib.AllowNonEmpty {
|
||||
options = append(options, fuse.AllowNonEmptyMount())
|
||||
}
|
||||
if mountlib.AllowOther {
|
||||
options = append(options, fuse.AllowOther())
|
||||
}
|
||||
if mountlib.AllowRoot {
|
||||
options = append(options, fuse.AllowRoot())
|
||||
}
|
||||
if mountlib.DefaultPermissions {
|
||||
options = append(options, fuse.DefaultPermissions())
|
||||
}
|
||||
if vfsflags.Opt.ReadOnly {
|
||||
options = append(options, fuse.ReadOnly())
|
||||
}
|
||||
if mountlib.WritebackCache {
|
||||
options = append(options, fuse.WritebackCache())
|
||||
}
|
||||
if len(mountlib.ExtraOptions) > 0 {
|
||||
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
|
||||
}
|
||||
if len(mountlib.ExtraOptions) > 0 {
|
||||
fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend")
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
// mount the file system
|
||||
//
|
||||
// The mount point will be ready when this returns.
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(f.Name()+":"+f.Root())...)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
filesys := NewFS(f)
|
||||
server := fusefs.New(c, nil)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
err := server.Serve(filesys)
|
||||
closeErr := c.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
errChan <- err
|
||||
}()
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
if err := c.MountError; err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
unmount := func() error {
|
||||
// Shutdown the VFS
|
||||
filesys.VFS.Shutdown()
|
||||
return fuse.Unmount(mountpoint)
|
||||
}
|
||||
|
||||
return filesys.VFS, errChan, unmount, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
if mountlib.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
fs.Debugf("fuse", "%v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Mount it
|
||||
FS, errChan, unmount, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
sigInt := make(chan os.Signal, 1)
|
||||
signal.Notify(sigInt, syscall.SIGINT, syscall.SIGTERM)
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
|
||||
if err := sdnotify.SdNotifyReady(); err != nil && err != sdnotify.SdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// Program abort: umount
|
||||
case <-sigInt:
|
||||
err = unmount()
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := FS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = sdnotify.SdNotifyStopping()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
33
cmd/mount/mount_test.go
Normal file
33
cmd/mount/mount_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/cmd/mountlib/mounttest"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) { mounttest.TestMain(m, mount) }
|
||||
func TestDirLs(t *testing.T) { mounttest.TestDirLs(t) }
|
||||
func TestDirCreateAndRemoveDir(t *testing.T) { mounttest.TestDirCreateAndRemoveDir(t) }
|
||||
func TestDirCreateAndRemoveFile(t *testing.T) { mounttest.TestDirCreateAndRemoveFile(t) }
|
||||
func TestDirRenameFile(t *testing.T) { mounttest.TestDirRenameFile(t) }
|
||||
func TestDirRenameEmptyDir(t *testing.T) { mounttest.TestDirRenameEmptyDir(t) }
|
||||
func TestDirRenameFullDir(t *testing.T) { mounttest.TestDirRenameFullDir(t) }
|
||||
func TestDirModTime(t *testing.T) { mounttest.TestDirModTime(t) }
|
||||
func TestDirCacheFlush(t *testing.T) { mounttest.TestDirCacheFlush(t) }
|
||||
func TestDirCacheFlushOnDirRename(t *testing.T) { mounttest.TestDirCacheFlushOnDirRename(t) }
|
||||
func TestFileModTime(t *testing.T) { mounttest.TestFileModTime(t) }
|
||||
func TestFileModTimeWithOpenWriters(t *testing.T) { mounttest.TestFileModTimeWithOpenWriters(t) }
|
||||
func TestMount(t *testing.T) { mounttest.TestMount(t) }
|
||||
func TestRoot(t *testing.T) { mounttest.TestRoot(t) }
|
||||
func TestReadByByte(t *testing.T) { mounttest.TestReadByByte(t) }
|
||||
func TestReadChecksum(t *testing.T) { mounttest.TestReadChecksum(t) }
|
||||
func TestReadFileDoubleClose(t *testing.T) { mounttest.TestReadFileDoubleClose(t) }
|
||||
func TestReadSeek(t *testing.T) { mounttest.TestReadSeek(t) }
|
||||
func TestWriteFileNoWrite(t *testing.T) { mounttest.TestWriteFileNoWrite(t) }
|
||||
func TestWriteFileWrite(t *testing.T) { mounttest.TestWriteFileWrite(t) }
|
||||
func TestWriteFileOverwrite(t *testing.T) { mounttest.TestWriteFileOverwrite(t) }
|
||||
func TestWriteFileDoubleClose(t *testing.T) { mounttest.TestWriteFileDoubleClose(t) }
|
||||
func TestWriteFileFsync(t *testing.T) { mounttest.TestWriteFileFsync(t) }
|
||||
6
cmd/mount/mount_unsupported.go
Normal file
6
cmd/mount/mount_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !linux,!darwin,!freebsd
|
||||
|
||||
package mount
|
||||
72
cmd/mount/test/seek_speed.go
Normal file
72
cmd/mount/test/seek_speed.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// +build ignore
|
||||
|
||||
// Read blocks out of a single file to time the seeking code
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
iterations = flag.Int("n", 25, "Iterations to try")
|
||||
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
|
||||
randSeed = flag.Int64("seed", 1, "Seed for the random number generator")
|
||||
)
|
||||
|
||||
func randomSeekTest(size int64, in *os.File, name string) {
|
||||
start := rand.Int63n(size)
|
||||
blockSize := rand.Intn(*maxBlockSize)
|
||||
if int64(blockSize) > size-start {
|
||||
blockSize = int(size - start)
|
||||
}
|
||||
log.Printf("Reading %d from %d", blockSize, start)
|
||||
|
||||
_, err := in.Seek(start, 0)
|
||||
if err != nil {
|
||||
log.Fatalf("Seek failed on %q: %v", name, err)
|
||||
}
|
||||
|
||||
buf := make([]byte, blockSize)
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
log.Fatalf("Read failed on %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Require 1 file as argument")
|
||||
}
|
||||
rand.Seed(*randSeed)
|
||||
|
||||
name := args[0]
|
||||
in, err := os.Open(name)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open %q: %v", name, err)
|
||||
}
|
||||
|
||||
fi, err := in.Stat()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't stat %q: %v", name, err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
for i := 0; i < *iterations; i++ {
|
||||
randomSeekTest(fi.Size(), in, name)
|
||||
}
|
||||
dt := time.Since(start)
|
||||
log.Printf("That took %v for %d iterations, %v per iteration", dt, *iterations, dt/time.Duration(*iterations))
|
||||
|
||||
err = in.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error closing %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
115
cmd/mount/test/seeker.go
Normal file
115
cmd/mount/test/seeker.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// +build ignore
|
||||
|
||||
// Read two files with lots of seeking to stress test the seek code
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
iterations = flag.Int("n", 1E6, "Iterations to try")
|
||||
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func randomSeekTest(size int64, in1, in2 *os.File, file1, file2 string) {
|
||||
start := rand.Int63n(size)
|
||||
blockSize := rand.Intn(*maxBlockSize)
|
||||
if int64(blockSize) > size-start {
|
||||
blockSize = int(size - start)
|
||||
}
|
||||
log.Printf("Reading %d from %d", blockSize, start)
|
||||
|
||||
_, err := in1.Seek(start, 0)
|
||||
if err != nil {
|
||||
log.Fatalf("Seek failed on %q: %v", file1, err)
|
||||
}
|
||||
_, err = in2.Seek(start, 0)
|
||||
if err != nil {
|
||||
log.Fatalf("Seek failed on %q: %v", file2, err)
|
||||
}
|
||||
|
||||
buf1 := make([]byte, blockSize)
|
||||
n1, err := io.ReadFull(in1, buf1)
|
||||
if err != nil {
|
||||
log.Fatalf("Read failed on %q: %v", file1, err)
|
||||
}
|
||||
|
||||
buf2 := make([]byte, blockSize)
|
||||
n2, err := io.ReadFull(in2, buf2)
|
||||
if err != nil {
|
||||
log.Fatalf("Read failed on %q: %v", file2, err)
|
||||
}
|
||||
|
||||
if n1 != n2 {
|
||||
log.Fatalf("Read different lengths %d (%q) != %d (%q)", n1, file1, n2, file2)
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf1, buf2) {
|
||||
log.Printf("Dumping different blocks")
|
||||
err = ioutil.WriteFile("/tmp/z1", buf1, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write /tmp/z1: %v", err)
|
||||
}
|
||||
err = ioutil.WriteFile("/tmp/z2", buf2, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write /tmp/z2: %v", err)
|
||||
}
|
||||
log.Fatalf("Read different contents - saved in /tmp/z1 and /tmp/z2")
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 2 {
|
||||
log.Fatalf("Require 2 files as argument")
|
||||
}
|
||||
file1, file2 := args[0], args[1]
|
||||
in1, err := os.Open(file1)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open %q: %v", file1, err)
|
||||
}
|
||||
in2, err := os.Open(file2)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open %q: %v", file2, err)
|
||||
}
|
||||
|
||||
fi1, err := in1.Stat()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't stat %q: %v", file1, err)
|
||||
}
|
||||
fi2, err := in2.Stat()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't stat %q: %v", file2, err)
|
||||
}
|
||||
|
||||
if fi1.Size() != fi2.Size() {
|
||||
log.Fatalf("Files not the same size")
|
||||
}
|
||||
|
||||
for i := 0; i < *iterations; i++ {
|
||||
randomSeekTest(fi1.Size(), in1, in2, file1, file2)
|
||||
}
|
||||
|
||||
err = in1.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error closing %q: %v", file1, err)
|
||||
}
|
||||
err = in2.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error closing %q: %v", file2, err)
|
||||
}
|
||||
}
|
||||
129
cmd/mount/test/seekers.go
Normal file
129
cmd/mount/test/seekers.go
Normal file
@@ -0,0 +1,129 @@
|
||||
// +build ignore
|
||||
|
||||
// Read lots files with lots of simultaneous seeking to stress test the seek code
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
iterations = flag.Int("n", 1E6, "Iterations to try")
|
||||
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
|
||||
simultaneous = flag.Int("transfers", 16, "Number of simultaneous files to open")
|
||||
seeksPerFile = flag.Int("seeks", 8, "Seeks per file")
|
||||
mask = flag.Int64("mask", 0, "mask for seek, eg 0x7fff")
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func seekTest(n int, file string) {
|
||||
in, err := os.Open(file)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open %q: %v", file, err)
|
||||
}
|
||||
fi, err := in.Stat()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't stat %q: %v", file, err)
|
||||
}
|
||||
size := fi.Size()
|
||||
|
||||
// FIXME make sure we try start and end
|
||||
|
||||
maxBlockSize := *maxBlockSize
|
||||
if int64(maxBlockSize) > size {
|
||||
maxBlockSize = int(size)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
start := rand.Int63n(size)
|
||||
if *mask != 0 {
|
||||
start &^= *mask
|
||||
}
|
||||
blockSize := rand.Intn(maxBlockSize)
|
||||
beyondEnd := false
|
||||
switch rand.Intn(10) {
|
||||
case 0:
|
||||
start = 0
|
||||
case 1:
|
||||
start = size - int64(blockSize)
|
||||
case 2:
|
||||
// seek beyond the end
|
||||
start = size + int64(blockSize)
|
||||
beyondEnd = true
|
||||
default:
|
||||
}
|
||||
if !beyondEnd && int64(blockSize) > size-start {
|
||||
blockSize = int(size - start)
|
||||
}
|
||||
log.Printf("%s: Reading %d from %d", file, blockSize, start)
|
||||
|
||||
_, err = in.Seek(start, 0)
|
||||
if err != nil {
|
||||
log.Fatalf("Seek failed on %q: %v", file, err)
|
||||
}
|
||||
|
||||
buf := make([]byte, blockSize)
|
||||
n, err := io.ReadFull(in, buf)
|
||||
if beyondEnd && err == io.EOF {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
log.Fatalf("Read failed on %q: %v (%d)", file, err, n)
|
||||
}
|
||||
}
|
||||
|
||||
err = in.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error closing %q: %v", file, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Find all the files in dir
|
||||
func findFiles(dir string) (files []string) {
|
||||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if info.Mode().IsRegular() && info.Size() > 0 {
|
||||
files = append(files, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
sort.Strings(files)
|
||||
return files
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Require a directory as argument")
|
||||
}
|
||||
dir := args[0]
|
||||
files := findFiles(dir)
|
||||
jobs := make(chan string, *simultaneous)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(*simultaneous)
|
||||
for i := 0; i < *simultaneous; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for file := range jobs {
|
||||
seekTest(*seeksPerFile, file)
|
||||
}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < *iterations; i++ {
|
||||
i := rand.Intn(len(files))
|
||||
jobs <- files[i]
|
||||
//jobs <- files[i]
|
||||
}
|
||||
close(jobs)
|
||||
wg.Wait()
|
||||
}
|
||||
201
cmd/mountlib/mount.go
Normal file
201
cmd/mountlib/mount.go
Normal file
@@ -0,0 +1,201 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
DebugFUSE = false
|
||||
AllowNonEmpty = false
|
||||
AllowRoot = false
|
||||
AllowOther = false
|
||||
DefaultPermissions = false
|
||||
WritebackCache = false
|
||||
MaxReadAhead fs.SizeSuffix = 128 * 1024
|
||||
ExtraOptions []string
|
||||
ExtraFlags []string
|
||||
)
|
||||
|
||||
// Check is folder is empty
|
||||
func checkMountEmpty(mountpoint string) error {
|
||||
fp, fpErr := os.Open(mountpoint)
|
||||
|
||||
if fpErr != nil {
|
||||
return errors.Wrap(fpErr, "Can not open: "+mountpoint)
|
||||
}
|
||||
defer fs.CheckClose(fp, &fpErr)
|
||||
|
||||
_, fpErr = fp.Readdirnames(1)
|
||||
|
||||
// directory is not empty
|
||||
if fpErr != io.EOF {
|
||||
var e error
|
||||
var errorMsg = "Directory is not empty: " + mountpoint + " If you want to mount it anyway use: --allow-non-empty option"
|
||||
if fpErr == nil {
|
||||
e = errors.New(errorMsg)
|
||||
} else {
|
||||
e = errors.Wrap(fpErr, errorMsg)
|
||||
}
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewMountCommand makes a mount command with the given name and Mount function
|
||||
func NewMountCommand(commandName string, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: commandName + " remote:path /path/to/mountpoint",
|
||||
Short: `Mount the remote as a mountpoint. **EXPERIMENTAL**`,
|
||||
Long: `
|
||||
rclone ` + commandName + ` allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with
|
||||
FUSE.
|
||||
|
||||
This is **EXPERIMENTAL** - use with care.
|
||||
|
||||
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
|
||||
|
||||
Start the mount like this
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files /path/to/local/mount
|
||||
|
||||
Or on Windows like this where X: is an unused drive letter
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files X:
|
||||
|
||||
When the program ends, either via Ctrl+C or receiving a SIGINT or SIGTERM signal,
|
||||
the mount is automatically stopped.
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
When that happens, it is the user's responsibility to stop the mount manually with
|
||||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
### Installing on Windows ###
|
||||
|
||||
To run rclone ` + commandName + ` on Windows, you will need to
|
||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
|
||||
WinFsp is an [open source](https://github.com/billziss-gh/winfsp)
|
||||
Windows File System Proxy which makes it easy to write user space file
|
||||
systems for Windows. It provides a FUSE emulation layer which rclone
|
||||
uses combination with
|
||||
[cgofuse](https://github.com/billziss-gh/cgofuse). Both of these
|
||||
packages are by Bill Zissimopoulos who was very helpful during the
|
||||
implementation of rclone ` + commandName + ` for Windows.
|
||||
|
||||
#### Windows caveats ####
|
||||
|
||||
Note that drives created as Administrator are not visible by other
|
||||
accounts (including the account that was elevated as
|
||||
Administrator). So if you start a Windows drive from an Administrative
|
||||
Command Prompt and then try to access the same drive from Explorer
|
||||
(which does not run as Administrator), you will not be able to see the
|
||||
new drive.
|
||||
|
||||
The easiest way around this is to start the drive from a normal
|
||||
command prompt. It is also possible to start a drive from the SYSTEM
|
||||
account (using [the WinFsp.Launcher
|
||||
infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture))
|
||||
which creates drives accessible for everyone on the system.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
This can only write files seqentially, it can only seek when reading.
|
||||
This means that many applications won't work with their files on an
|
||||
rclone mount.
|
||||
|
||||
The bucket based remotes (eg Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) won't work from the root - you will need to specify a bucket,
|
||||
or a path within the bucket. So ` + "`swift:`" + ` won't work whereas
|
||||
` + "`swift:bucket`" + ` will as will ` + "`swift:bucket/path`" + `.
|
||||
None of these support the concept of directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
|
||||
|
||||
### rclone ` + commandName + ` vs rclone sync/copy ##
|
||||
|
||||
File systems expect things to be 100% reliable, whereas cloud storage
|
||||
systems are a long way from 100% reliable. The rclone sync/copy
|
||||
commands cope with this with lots of retries. However rclone ` + commandName + `
|
||||
can't use retries in the same way without making local copies of the
|
||||
uploads. This might happen in the future, but for the moment rclone
|
||||
` + commandName + ` won't do that, so will be less reliable than the rclone command.
|
||||
|
||||
### Filters ###
|
||||
|
||||
Note that all the rclone filters can be used to select a subset of the
|
||||
files to be visible in the mount.
|
||||
|
||||
### systemd ###
|
||||
|
||||
When running rclone ` + commandName + ` as a systemd service, it is possible
|
||||
to use Type=notify. In this case the service will enter the started state
|
||||
after the mountpoint has been successfully set up.
|
||||
Units having the rclone ` + commandName + ` service specified as a requirement
|
||||
will see all files and folders immediately in this mode.
|
||||
` + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fdst := cmd.NewFsDst(args)
|
||||
|
||||
// Show stats if the user has specifically requested them
|
||||
if cmd.ShowStats() {
|
||||
stopStats := cmd.StartStats()
|
||||
defer close(stopStats)
|
||||
}
|
||||
|
||||
// Skip checkMountEmpty if --allow-non-empty flag is used or if
|
||||
// the Operating System is Windows
|
||||
if !AllowNonEmpty && runtime.GOOS != "windows" {
|
||||
err := checkMountEmpty(args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := Mount(fdst, args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// Register the command
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
|
||||
// Add flags
|
||||
flags := commandDefintion.Flags()
|
||||
fs.BoolVarP(flags, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
|
||||
// mount options
|
||||
fs.BoolVarP(flags, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
|
||||
fs.BoolVarP(flags, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
|
||||
fs.BoolVarP(flags, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
|
||||
fs.BoolVarP(flags, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
|
||||
fs.BoolVarP(flags, &WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
|
||||
fs.FlagsVarP(flags, &MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
|
||||
fs.StringArrayVarP(flags, &ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
|
||||
fs.StringArrayVarP(flags, &ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
|
||||
//fs.BoolVarP(flags, &foreground, "foreground", "", foreground, "Do not detach.")
|
||||
|
||||
// Add in the generic flags
|
||||
vfsflags.AddFlags(flags)
|
||||
|
||||
return commandDefintion
|
||||
}
|
||||
227
cmd/mountlib/mounttest/dir.go
Normal file
227
cmd/mountlib/mounttest/dir.go
Normal file
@@ -0,0 +1,227 @@
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDirLs checks out listing
|
||||
func TestDirLs(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.checkDir(t, "")
|
||||
|
||||
run.mkdir(t, "a directory")
|
||||
run.createFile(t, "a file", "hello")
|
||||
|
||||
run.checkDir(t, "a directory/|a file 5")
|
||||
|
||||
run.rmdir(t, "a directory")
|
||||
run.rm(t, "a file")
|
||||
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirCreateAndRemoveDir tests creating and removing a directory
|
||||
func TestDirCreateAndRemoveDir(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.mkdir(t, "dir/subdir")
|
||||
run.checkDir(t, "dir/|dir/subdir/")
|
||||
|
||||
// Check we can't delete a directory with stuff in
|
||||
err := os.Remove(run.path("dir"))
|
||||
assert.Error(t, err, "file exists")
|
||||
|
||||
// Now delete subdir then dir - should produce no errors
|
||||
run.rmdir(t, "dir/subdir")
|
||||
run.checkDir(t, "dir/")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirCreateAndRemoveFile tests creating and removing a file
|
||||
func TestDirCreateAndRemoveFile(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.createFile(t, "dir/file", "potato")
|
||||
run.checkDir(t, "dir/|dir/file 6")
|
||||
|
||||
// Check we can't delete a directory with stuff in
|
||||
err := os.Remove(run.path("dir"))
|
||||
assert.Error(t, err, "file exists")
|
||||
|
||||
// Now delete file
|
||||
run.rm(t, "dir/file")
|
||||
|
||||
run.checkDir(t, "dir/")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirRenameFile tests renaming a file
|
||||
func TestDirRenameFile(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.createFile(t, "file", "potato")
|
||||
run.checkDir(t, "dir/|file 6")
|
||||
|
||||
err := os.Rename(run.path("file"), run.path("file2"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|file2 6")
|
||||
|
||||
data := run.readFile(t, "file2")
|
||||
assert.Equal(t, "potato", data)
|
||||
|
||||
err = os.Rename(run.path("file2"), run.path("dir/file3"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|dir/file3 6")
|
||||
|
||||
data = run.readFile(t, "dir/file3")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "potato", data)
|
||||
|
||||
run.rm(t, "dir/file3")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirRenameEmptyDir tests renaming and empty directory
|
||||
func TestDirRenameEmptyDir(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.mkdir(t, "dir1")
|
||||
run.checkDir(t, "dir/|dir1/")
|
||||
|
||||
err := os.Rename(run.path("dir1"), run.path("dir/dir2"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|dir/dir2/")
|
||||
|
||||
err = os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|dir/dir3/")
|
||||
|
||||
run.rmdir(t, "dir/dir3")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirRenameFullDir tests renaming a full directory
|
||||
func TestDirRenameFullDir(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.mkdir(t, "dir1")
|
||||
run.createFile(t, "dir1/potato.txt", "maris piper")
|
||||
run.checkDir(t, "dir/|dir1/|dir1/potato.txt 11")
|
||||
|
||||
err := os.Rename(run.path("dir1"), run.path("dir/dir2"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|dir/dir2/|dir/dir2/potato.txt 11")
|
||||
|
||||
err = os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, "dir/|dir/dir3/|dir/dir3/potato.txt 11")
|
||||
|
||||
run.rm(t, "dir/dir3/potato.txt")
|
||||
run.rmdir(t, "dir/dir3")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirModTime tests mod times
|
||||
func TestDirModTime(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
|
||||
err := os.Chtimes(run.path("dir"), mtime, mtime)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(run.path("dir"))
|
||||
require.NoError(t, err)
|
||||
|
||||
// avoid errors because of timezone differences
|
||||
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
|
||||
|
||||
run.rmdir(t, "dir")
|
||||
}
|
||||
|
||||
// TestDirCacheFlush tests fluching the dir cache
|
||||
func TestDirCacheFlush(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.checkDir(t, "")
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
run.mkdir(t, "otherdir")
|
||||
run.createFile(t, "dir/file", "1")
|
||||
run.createFile(t, "otherdir/file", "1")
|
||||
|
||||
dm := newDirMap("otherdir/|otherdir/file 1|dir/|dir/file 1")
|
||||
localDm := make(dirMap)
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
err := run.fremote.Mkdir("dir/subdir")
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := run.vfs.Root()
|
||||
require.NoError(t, err)
|
||||
|
||||
// expect newly created "subdir" on remote to not show up
|
||||
root.ForgetPath("otherdir")
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
root.ForgetPath("dir")
|
||||
dm = newDirMap("otherdir/|otherdir/file 1|dir/|dir/file 1|dir/subdir/")
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
run.rm(t, "otherdir/file")
|
||||
run.rmdir(t, "otherdir")
|
||||
run.rm(t, "dir/file")
|
||||
run.rmdir(t, "dir/subdir")
|
||||
run.rmdir(t, "dir")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
|
||||
// TestDirCacheFlushOnDirRename tests flushing the dir cache on rename
|
||||
func TestDirCacheFlushOnDirRename(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
run.mkdir(t, "dir")
|
||||
run.createFile(t, "dir/file", "1")
|
||||
|
||||
dm := newDirMap("dir/|dir/file 1")
|
||||
localDm := make(dirMap)
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
// expect remotely created directory to not show up
|
||||
err := run.fremote.Mkdir("dir/subdir")
|
||||
require.NoError(t, err)
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
err = os.Rename(run.path("dir"), run.path("rid"))
|
||||
require.NoError(t, err)
|
||||
|
||||
dm = newDirMap("rid/|rid/subdir/|rid/file 1")
|
||||
localDm = make(dirMap)
|
||||
run.readLocal(t, localDm, "")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
|
||||
run.rm(t, "rid/file")
|
||||
run.rmdir(t, "rid/subdir")
|
||||
run.rmdir(t, "rid")
|
||||
run.checkDir(t, "")
|
||||
}
|
||||
64
cmd/mountlib/mounttest/file.go
Normal file
64
cmd/mountlib/mounttest/file.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestFileModTime tests mod times on files
|
||||
func TestFileModTime(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.createFile(t, "file", "123")
|
||||
|
||||
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
|
||||
err := os.Chtimes(run.path("file"), mtime, mtime)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(run.path("file"))
|
||||
require.NoError(t, err)
|
||||
|
||||
// avoid errors because of timezone differences
|
||||
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
|
||||
|
||||
run.rm(t, "file")
|
||||
}
|
||||
|
||||
// os.Create without opening for write too
|
||||
func osCreate(name string) (*os.File, error) {
|
||||
return os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
}
|
||||
|
||||
// TestFileModTimeWithOpenWriters tests mod time on open files
|
||||
func TestFileModTimeWithOpenWriters(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
|
||||
filepath := run.path("cp-archive-test")
|
||||
|
||||
f, err := osCreate(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.Write([]byte{104, 105})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.Chtimes(filepath, mtime, mtime)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = f.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
run.waitForWriters()
|
||||
|
||||
info, err := os.Stat(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// avoid errors because of timezone differences
|
||||
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
|
||||
|
||||
run.rm(t, "cp-archive-test")
|
||||
}
|
||||
381
cmd/mountlib/mounttest/fs.go
Normal file
381
cmd/mountlib/mounttest/fs.go
Normal file
@@ -0,0 +1,381 @@
|
||||
// Test suite for rclonefs
|
||||
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
_ "github.com/ncw/rclone/fs/all" // import all the file systems
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type (
|
||||
// UnmountFn is called to unmount the file system
|
||||
UnmountFn func() error
|
||||
// MountFn is called to mount the file system
|
||||
MountFn func(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error)
|
||||
)
|
||||
|
||||
var (
|
||||
mountFn MountFn
|
||||
)
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M, fn MountFn) {
|
||||
mountFn = fn
|
||||
flag.Parse()
|
||||
var rc int
|
||||
cacheModes := []vfs.CacheMode{
|
||||
vfs.CacheModeOff,
|
||||
vfs.CacheModeMinimal,
|
||||
vfs.CacheModeWrites,
|
||||
vfs.CacheModeFull,
|
||||
}
|
||||
run = newRun()
|
||||
for _, cacheMode := range cacheModes {
|
||||
run.cacheMode(cacheMode)
|
||||
log.Printf("Starting test run with cache mode %v", cacheMode)
|
||||
rc = m.Run()
|
||||
log.Printf("Finished test run with cache mode %v", cacheMode)
|
||||
if rc != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
run.Finalise()
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
// Run holds the remotes for a test run
|
||||
type Run struct {
|
||||
vfs *vfs.VFS
|
||||
mountPath string
|
||||
fremote fs.Fs
|
||||
fremoteName string
|
||||
cleanRemote func()
|
||||
umountResult <-chan error
|
||||
umountFn UnmountFn
|
||||
skip bool
|
||||
}
|
||||
|
||||
// run holds the master Run data
|
||||
var run *Run
|
||||
|
||||
// newRun initialise the remote mount for testing and returns a run
|
||||
// object.
|
||||
//
|
||||
// r.fremote is an empty remote Fs
|
||||
//
|
||||
// Finalise() will tidy them away when done.
|
||||
func newRun() *Run {
|
||||
r := &Run{
|
||||
umountResult: make(chan error, 1),
|
||||
}
|
||||
|
||||
fstest.Initialise()
|
||||
|
||||
var err error
|
||||
r.fremote, r.fremoteName, r.cleanRemote, err = fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open remote %q: %v", *fstest.RemoteName, err)
|
||||
}
|
||||
|
||||
err = r.fremote.Mkdir("")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err)
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
r.mountPath, err = ioutil.TempDir("", "rclonefs-mount")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create mount dir: %v", err)
|
||||
}
|
||||
} else {
|
||||
// Find a free drive letter
|
||||
drive := ""
|
||||
for letter := 'E'; letter <= 'Z'; letter++ {
|
||||
drive = string(letter) + ":"
|
||||
_, err := os.Stat(drive + "\\")
|
||||
if os.IsNotExist(err) {
|
||||
goto found
|
||||
}
|
||||
}
|
||||
log.Fatalf("Couldn't find free drive letter for test")
|
||||
found:
|
||||
r.mountPath = drive
|
||||
}
|
||||
|
||||
// Mount it up
|
||||
r.mount()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Run) mount() {
|
||||
log.Printf("mount %q %q", r.fremote, r.mountPath)
|
||||
var err error
|
||||
r.vfs, r.umountResult, r.umountFn, err = mountFn(r.fremote, r.mountPath)
|
||||
if err != nil {
|
||||
log.Printf("mount FAILED: %v", err)
|
||||
r.skip = true
|
||||
} else {
|
||||
log.Printf("mount OK")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Run) umount() {
|
||||
if r.skip {
|
||||
log.Printf("FUSE not found so skipping umount")
|
||||
return
|
||||
}
|
||||
/*
|
||||
log.Printf("Calling fusermount -u %q", r.mountPath)
|
||||
err := exec.Command("fusermount", "-u", r.mountPath).Run()
|
||||
if err != nil {
|
||||
log.Printf("fusermount failed: %v", err)
|
||||
}
|
||||
*/
|
||||
log.Printf("Unmounting %q", r.mountPath)
|
||||
err := r.umountFn()
|
||||
if err != nil {
|
||||
log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
err = r.umountFn()
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("signal to umount failed: %v", err)
|
||||
}
|
||||
log.Printf("Waiting for umount")
|
||||
err = <-r.umountResult
|
||||
if err != nil {
|
||||
log.Fatalf("umount failed: %v", err)
|
||||
}
|
||||
|
||||
// Cleanup the VFS cache - umount has called Shutdown
|
||||
err = r.vfs.CleanUp()
|
||||
if err != nil {
|
||||
log.Printf("Failed to cleanup the VFS cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// cacheMode flushes the VFS and changes the CacheMode
|
||||
func (r *Run) cacheMode(cacheMode vfs.CacheMode) {
|
||||
if r.skip {
|
||||
log.Printf("FUSE not found so skipping cacheMode")
|
||||
return
|
||||
}
|
||||
// Wait for writers to finish
|
||||
r.vfs.WaitForWriters(30 * time.Second)
|
||||
// Empty and remake the remote
|
||||
r.cleanRemote()
|
||||
err := r.fremote.Mkdir("")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err)
|
||||
}
|
||||
// Empty the cache
|
||||
err = r.vfs.CleanUp()
|
||||
if err != nil {
|
||||
log.Printf("Failed to cleanup the VFS cache: %v", err)
|
||||
}
|
||||
// Reset the cache mode
|
||||
r.vfs.Opt.CacheMode = cacheMode
|
||||
// Flush the directory cache
|
||||
r.vfs.FlushDirCache()
|
||||
|
||||
}
|
||||
|
||||
func (r *Run) skipIfNoFUSE(t *testing.T) {
|
||||
if r.skip {
|
||||
t.Skip("FUSE not found so skipping test")
|
||||
}
|
||||
}
|
||||
|
||||
// Finalise cleans the remote and unmounts
|
||||
func (r *Run) Finalise() {
|
||||
r.umount()
|
||||
r.cleanRemote()
|
||||
err := os.RemoveAll(r.mountPath)
|
||||
if err != nil {
|
||||
log.Printf("Failed to clean mountPath %q: %v", r.mountPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// path returns an OS local path for filepath
|
||||
func (r *Run) path(filePath string) string {
|
||||
// return windows drive letter root as E:\
|
||||
if filePath == "" && runtime.GOOS == "windows" {
|
||||
return run.mountPath + `\`
|
||||
}
|
||||
return filepath.Join(run.mountPath, filepath.FromSlash(filePath))
|
||||
}
|
||||
|
||||
type dirMap map[string]struct{}
|
||||
|
||||
// Create a dirMap from a string
|
||||
func newDirMap(dirString string) (dm dirMap) {
|
||||
dm = make(dirMap)
|
||||
for _, entry := range strings.Split(dirString, "|") {
|
||||
if entry != "" {
|
||||
dm[entry] = struct{}{}
|
||||
}
|
||||
}
|
||||
return dm
|
||||
}
|
||||
|
||||
// Returns a dirmap with only the files in
|
||||
func (dm dirMap) filesOnly() dirMap {
|
||||
newDm := make(dirMap)
|
||||
for name := range dm {
|
||||
if !strings.HasSuffix(name, "/") {
|
||||
newDm[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
return newDm
|
||||
}
|
||||
|
||||
// reads the local tree into dir
|
||||
func (r *Run) readLocal(t *testing.T, dir dirMap, filePath string) {
|
||||
realPath := r.path(filePath)
|
||||
files, err := ioutil.ReadDir(realPath)
|
||||
require.NoError(t, err)
|
||||
for _, fi := range files {
|
||||
name := path.Join(filePath, fi.Name())
|
||||
if fi.IsDir() {
|
||||
dir[name+"/"] = struct{}{}
|
||||
r.readLocal(t, dir, name)
|
||||
assert.Equal(t, run.vfs.Opt.DirPerms&os.ModePerm, fi.Mode().Perm())
|
||||
} else {
|
||||
dir[fmt.Sprintf("%s %d", name, fi.Size())] = struct{}{}
|
||||
assert.Equal(t, run.vfs.Opt.FilePerms&os.ModePerm, fi.Mode().Perm())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reads the remote tree into dir
|
||||
func (r *Run) readRemote(t *testing.T, dir dirMap, filepath string) {
|
||||
objs, dirs, err := fs.WalkGetAll(r.fremote, filepath, true, 1)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
for _, obj := range objs {
|
||||
dir[fmt.Sprintf("%s %d", obj.Remote(), obj.Size())] = struct{}{}
|
||||
}
|
||||
for _, d := range dirs {
|
||||
name := d.Remote()
|
||||
dir[name+"/"] = struct{}{}
|
||||
r.readRemote(t, dir, name)
|
||||
}
|
||||
}
|
||||
|
||||
// checkDir checks the local and remote against the string passed in
|
||||
func (r *Run) checkDir(t *testing.T, dirString string) {
|
||||
var retries = *fstest.ListRetries
|
||||
sleep := time.Second / 5
|
||||
var remoteOK, fuseOK bool
|
||||
var dm, localDm, remoteDm dirMap
|
||||
for i := 1; i <= retries; i++ {
|
||||
dm = newDirMap(dirString)
|
||||
localDm = make(dirMap)
|
||||
r.readLocal(t, localDm, "")
|
||||
remoteDm = make(dirMap)
|
||||
r.readRemote(t, remoteDm, "")
|
||||
// Ignore directories for remote compare
|
||||
remoteOK = reflect.DeepEqual(dm.filesOnly(), remoteDm.filesOnly())
|
||||
fuseOK = reflect.DeepEqual(dm, localDm)
|
||||
if remoteOK && fuseOK {
|
||||
return
|
||||
}
|
||||
sleep *= 2
|
||||
t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
assert.Equal(t, dm.filesOnly(), remoteDm.filesOnly(), "expected vs remote")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
}
|
||||
|
||||
// wait for any files being written to be released by fuse
|
||||
func (r *Run) waitForWriters() {
|
||||
run.vfs.WaitForWriters(10 * time.Second)
|
||||
}
|
||||
|
||||
func (r *Run) createFile(t *testing.T, filepath string, contents string) {
|
||||
filepath = r.path(filepath)
|
||||
err := ioutil.WriteFile(filepath, []byte(contents), 0600)
|
||||
require.NoError(t, err)
|
||||
r.waitForWriters()
|
||||
}
|
||||
|
||||
func (r *Run) readFile(t *testing.T, filepath string) string {
|
||||
filepath = r.path(filepath)
|
||||
result, err := ioutil.ReadFile(filepath)
|
||||
require.NoError(t, err)
|
||||
time.Sleep(100 * time.Millisecond) // FIXME wait for Release
|
||||
return string(result)
|
||||
}
|
||||
|
||||
func (r *Run) mkdir(t *testing.T, filepath string) {
|
||||
filepath = r.path(filepath)
|
||||
err := os.Mkdir(filepath, 0700)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (r *Run) rm(t *testing.T, filepath string) {
|
||||
filepath = r.path(filepath)
|
||||
err := os.Remove(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for file to disappear from listing
|
||||
for i := 0; i < 10; i++ {
|
||||
_, err := os.Stat(filepath)
|
||||
if os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
assert.Fail(t, "failed to delete file", filepath)
|
||||
}
|
||||
|
||||
func (r *Run) rmdir(t *testing.T, filepath string) {
|
||||
filepath = r.path(filepath)
|
||||
err := os.Remove(filepath)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestMount checks that the Fs is mounted by seeing if the mountpoint
|
||||
// is in the mount output
|
||||
func TestMount(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("not running on windows")
|
||||
}
|
||||
|
||||
out, err := exec.Command("mount").Output()
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(out), run.mountPath)
|
||||
}
|
||||
|
||||
// TestRoot checks root directory is present and correct
|
||||
func TestRoot(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
fi, err := os.Lstat(run.mountPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.IsDir())
|
||||
assert.Equal(t, run.vfs.Opt.DirPerms&os.ModePerm, fi.Mode().Perm())
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user